repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
CmosWolf1/Code_implementation_for_paper_SKZC | demo.py | [
{
"identifier": "VisualizationDemo",
"path": "diffusiondet/predictor.py",
"snippet": "class VisualizationDemo(object):\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):\n \"\"\"\n Args:\n cfg (CfgNode):\n instance_mode (ColorMode):\n parallel (bool): whether to run the model in different processes from visualization.\n Useful since the visualization logic can be slow.\n \"\"\"\n self.metadata = MetadataCatalog.get(\n cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else \"__unused\"\n )\n self.cpu_device = torch.device(\"cpu\")\n self.instance_mode = instance_mode\n\n self.parallel = parallel\n if parallel:\n num_gpu = torch.cuda.device_count()\n self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)\n else:\n self.predictor = DefaultPredictor(cfg)\n \n self.threshold = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST # workaround\n\n def run_on_image(self, image):\n \"\"\"\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n\n Returns:\n predictions (dict): the output of the model.\n vis_output (VisImage): the visualized image output.\n \"\"\"\n vis_output = None\n predictions = self.predictor(image)\n # Filter\n instances = predictions['instances']\n new_instances = instances[instances.scores > self.threshold]\n predictions = {'instances': new_instances}\n # Convert image from OpenCV BGR format to Matplotlib RGB format.\n image = image[:, :, ::-1]\n visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n if \"panoptic_seg\" in predictions:\n panoptic_seg, segments_info = predictions[\"panoptic_seg\"]\n vis_output = visualizer.draw_panoptic_seg_predictions(\n panoptic_seg.to(self.cpu_device), segments_info\n )\n else:\n if \"sem_seg\" in predictions:\n vis_output = visualizer.draw_sem_seg(\n predictions[\"sem_seg\"].argmax(dim=0).to(self.cpu_device)\n )\n if \"instances\" in predictions:\n instances = predictions[\"instances\"].to(self.cpu_device)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\n\n return predictions, vis_output\n\n def _frame_from_video(self, video):\n while video.isOpened():\n success, frame = video.read()\n if success:\n yield frame\n else:\n break\n\n def run_on_video(self, video):\n \"\"\"\n Visualizes predictions on frames of the input video.\n\n Args:\n video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be\n either a webcam or a video file.\n\n Yields:\n ndarray: BGR visualizations of each video frame.\n \"\"\"\n video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)\n\n def process_predictions(frame, predictions):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n if \"panoptic_seg\" in predictions:\n panoptic_seg, segments_info = predictions[\"panoptic_seg\"]\n vis_frame = video_visualizer.draw_panoptic_seg_predictions(\n frame, panoptic_seg.to(self.cpu_device), segments_info\n )\n elif \"instances\" in predictions:\n predictions = predictions[\"instances\"].to(self.cpu_device)\n vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)\n elif \"sem_seg\" in predictions:\n vis_frame = video_visualizer.draw_sem_seg(\n frame, predictions[\"sem_seg\"].argmax(dim=0).to(self.cpu_device)\n )\n\n # Converts Matplotlib RGB format to OpenCV BGR format\n vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)\n return vis_frame\n\n frame_gen = self._frame_from_video(video)\n if self.parallel:\n buffer_size = self.predictor.default_buffer_size\n\n frame_data = deque()\n\n for cnt, frame in enumerate(frame_gen):\n frame_data.append(frame)\n self.predictor.put(frame)\n\n if cnt >= buffer_size:\n frame = frame_data.popleft()\n predictions = self.predictor.get()\n yield process_predictions(frame, predictions)\n\n while len(frame_data):\n frame = frame_data.popleft()\n predictions = self.predictor.get()\n yield process_predictions(frame, predictions)\n else:\n for frame in frame_gen:\n yield process_predictions(frame, self.predictor(frame))"
},
{
"identifier": "add_diffusiondet_config",
"path": "diffusiondet/config.py",
"snippet": "def add_diffusiondet_config(cfg):\n \"\"\"\n Add config for DiffusionDet\n \"\"\"\n cfg.MODEL.DiffusionDet = CN()\n cfg.MODEL.DiffusionDet.NUM_CLASSES = 80\n cfg.MODEL.DiffusionDet.NUM_PROPOSALS = 300\n\n # RCNN Head.\n cfg.MODEL.DiffusionDet.NHEADS = 8\n cfg.MODEL.DiffusionDet.DROPOUT = 0.0\n cfg.MODEL.DiffusionDet.DIM_FEEDFORWARD = 2048\n cfg.MODEL.DiffusionDet.ACTIVATION = 'relu'\n cfg.MODEL.DiffusionDet.HIDDEN_DIM = 256\n cfg.MODEL.DiffusionDet.NUM_CLS = 1\n cfg.MODEL.DiffusionDet.NUM_REG = 3\n cfg.MODEL.DiffusionDet.NUM_HEADS = 6\n\n # Dynamic Conv.\n cfg.MODEL.DiffusionDet.NUM_DYNAMIC = 2\n cfg.MODEL.DiffusionDet.DIM_DYNAMIC = 64\n\n # Loss.\n cfg.MODEL.DiffusionDet.CLASS_WEIGHT = 2.0\n cfg.MODEL.DiffusionDet.GIOU_WEIGHT = 2.0\n cfg.MODEL.DiffusionDet.L1_WEIGHT = 5.0\n cfg.MODEL.DiffusionDet.DEEP_SUPERVISION = True\n cfg.MODEL.DiffusionDet.NO_OBJECT_WEIGHT = 0.1\n\n # Focal Loss.\n cfg.MODEL.DiffusionDet.USE_FOCAL = True\n cfg.MODEL.DiffusionDet.USE_FED_LOSS = False\n cfg.MODEL.DiffusionDet.ALPHA = 0.25\n cfg.MODEL.DiffusionDet.GAMMA = 2.0\n cfg.MODEL.DiffusionDet.PRIOR_PROB = 0.01\n\n # Dynamic K\n cfg.MODEL.DiffusionDet.OTA_K = 5\n\n # Diffusion\n cfg.MODEL.DiffusionDet.SNR_SCALE = 2.0\n cfg.MODEL.DiffusionDet.SAMPLE_STEP = 1\n\n # Inference\n cfg.MODEL.DiffusionDet.USE_NMS = True\n\n # Swin Backbones\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.SIZE = 'B' # 'T', 'S', 'B'\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n cfg.MODEL.SWIN.OUT_FEATURES = (0, 1, 2, 3) # modify\n\n # Optimizer.\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 1.0\n\n # TTA.\n cfg.TEST.AUG.MIN_SIZES = (400, 500, 600, 640, 700, 900, 1000, 1100, 1200, 1300, 1400, 1800, 800)\n cfg.TEST.AUG.CVPODS_TTA = True\n cfg.TEST.AUG.SCALE_FILTER = True\n cfg.TEST.AUG.SCALE_RANGES = ([96, 10000], [96, 10000], \n [64, 10000], [64, 10000],\n [64, 10000], [0, 10000],\n [0, 10000], [0, 256],\n [0, 256], [0, 192],\n [0, 192], [0, 96],\n [0, 10000])"
},
{
"identifier": "DiffusionDetDatasetMapper",
"path": "diffusiondet/dataset_mapper.py",
"snippet": "class DiffusionDetDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by DiffusionDet.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n def __init__(self, cfg, is_train=True):\n if cfg.INPUT.CROP.ENABLED and is_train:\n self.crop_gen = [\n T.ResizeShortestEdge([400, 500, 600], sample_style=\"choice\"),\n T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),\n ]\n else:\n self.crop_gen = None\n\n self.tfm_gens = build_transform_gen(cfg, is_train)\n logging.getLogger(__name__).info(\n \"Full TransformGens used in training: {}, crop: {}\".format(str(self.tfm_gens), str(self.crop_gen))\n )\n\n self.img_format = cfg.INPUT.FORMAT\n self.is_train = is_train\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if self.crop_gen is None:\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n else:\n if np.random.rand() > 0.5:\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n else:\n image, transforms = T.apply_transform_gens(\n self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image\n )\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n instances = utils.annotations_to_instances(annos, image_shape)\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n return dataset_dict"
},
{
"identifier": "DiffusionDetWithTTA",
"path": "diffusiondet/test_time_augmentation.py",
"snippet": "class DiffusionDetWithTTA(GeneralizedRCNNWithTTA):\n \"\"\"\n A DiffusionDet with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`DiffusionDet.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=3):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (DiffusionDet): a DiffusionDet to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n # fix the issue: cannot assign module before Module.__init__() call\n nn.Module.__init__(self)\n if isinstance(model, DistributedDataParallel):\n model = model.module\n\n self.cfg = cfg.clone()\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n # cvpods tta.\n self.enable_cvpods_tta = cfg.TEST.AUG.CVPODS_TTA\n self.enable_scale_filter = cfg.TEST.AUG.SCALE_FILTER\n self.scale_ranges = cfg.TEST.AUG.SCALE_RANGES\n self.max_detection = cfg.MODEL.DiffusionDet.NUM_PROPOSALS\n\n def _batch_inference(self, batched_inputs, detected_instances=None):\n \"\"\"\n Execute inference on a list of inputs,\n using batch size = self.batch_size, instead of the length of the list.\n\n Inputs & outputs have the same format as :meth:`DiffusionDet.forward`\n \"\"\"\n if detected_instances is None:\n detected_instances = [None] * len(batched_inputs)\n\n factors = 2 if self.tta_mapper.flip else 1\n if self.enable_scale_filter:\n assert len(batched_inputs) == len(self.scale_ranges) * factors\n\n outputs = []\n inputs, instances = [], []\n for idx, input, instance in zip(count(), batched_inputs, detected_instances):\n inputs.append(input)\n instances.append(instance)\n if self.enable_cvpods_tta:\n output = self.model.forward(inputs, do_postprocess=False)[0]\n if self.enable_scale_filter:\n pred_boxes = output.get(\"pred_boxes\")\n keep = self.filter_boxes(pred_boxes.tensor, *self.scale_ranges[idx // factors])\n output = Instances(\n image_size=output.image_size,\n pred_boxes=Boxes(pred_boxes.tensor[keep]),\n pred_classes=output.pred_classes[keep],\n scores=output.scores[keep])\n outputs.extend([output])\n else:\n\n if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:\n outputs.extend(\n self.model.forward(\n inputs,\n do_postprocess=False,\n )\n )\n inputs, instances = [], []\n return outputs\n\n @staticmethod\n def filter_boxes(boxes, min_scale, max_scale):\n \"\"\"\n boxes: (N, 4) shape\n \"\"\"\n # assert boxes.mode == \"xyxy\"\n w = boxes[:, 2] - boxes[:, 0]\n h = boxes[:, 3] - boxes[:, 1]\n keep = (w * h > min_scale * min_scale) & (w * h < max_scale * max_scale)\n return keep\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n # Detect boxes from all augmented versions\n all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms)\n # merge all detected boxes to obtain final predictions for boxes\n if self.enable_cvpods_tta:\n merged_instances = self._merge_detections_cvpods_tta(all_boxes, all_scores, all_classes, orig_shape)\n else:\n merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)\n\n return {\"instances\": merged_instances}\n\n def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw):\n # select from the union of all results\n num_boxes = len(all_boxes)\n num_classes = self.cfg.MODEL.DiffusionDet.NUM_CLASSES\n # +1 because fast_rcnn_inference expects background scores as well\n all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device)\n for idx, cls, score in zip(count(), all_classes, all_scores):\n all_scores_2d[idx, cls] = score\n\n merged_instances, _ = fast_rcnn_inference_single_image(\n all_boxes,\n all_scores_2d,\n shape_hw,\n 1e-8,\n self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,\n self.cfg.TEST.DETECTIONS_PER_IMAGE,\n )\n\n return merged_instances\n\n def _merge_detections_cvpods_tta(self, all_boxes, all_scores, all_classes, shape_hw):\n all_scores = torch.tensor(all_scores).to(all_boxes.device)\n all_classes = torch.tensor(all_classes).to(all_boxes.device)\n\n all_boxes, all_scores, all_classes = self.merge_result_from_multi_scales(\n all_boxes, all_scores, all_classes,\n nms_type=\"soft_vote\", vote_thresh=0.65,\n max_detection=self.max_detection\n )\n\n all_boxes = Boxes(all_boxes)\n all_boxes.clip(shape_hw)\n\n result = Instances(shape_hw)\n result.pred_boxes = all_boxes\n result.scores = all_scores\n result.pred_classes = all_classes.long()\n return result\n\n def merge_result_from_multi_scales(\n self, boxes, scores, labels, nms_type=\"soft-vote\", vote_thresh=0.65, max_detection=100\n ):\n boxes, scores, labels = self.batched_vote_nms(\n boxes, scores, labels, nms_type, vote_thresh\n )\n\n number_of_detections = boxes.shape[0]\n # Limit to max_per_image detections **over all classes**\n if number_of_detections > max_detection > 0:\n boxes = boxes[:max_detection]\n scores = scores[:max_detection]\n labels = labels[:max_detection]\n\n return boxes, scores, labels\n\n def batched_vote_nms(self, boxes, scores, labels, vote_type, vote_thresh=0.65):\n # apply per class level nms, add max_coordinates on boxes first, then remove it.\n labels = labels.float()\n max_coordinates = boxes.max() + 1\n offsets = labels.reshape(-1, 1) * max_coordinates\n boxes = boxes + offsets\n\n boxes, scores, labels = self.bbox_vote(boxes, scores, labels, vote_thresh, vote_type)\n boxes -= labels.reshape(-1, 1) * max_coordinates\n\n return boxes, scores, labels\n\n def bbox_vote(self, boxes, scores, labels, vote_thresh, vote_type=\"softvote\"):\n assert boxes.shape[0] == scores.shape[0] == labels.shape[0]\n det = torch.cat((boxes, scores.reshape(-1, 1), labels.reshape(-1, 1)), dim=1)\n\n vote_results = torch.zeros(0, 6, device=det.device)\n if det.numel() == 0:\n return vote_results[:, :4], vote_results[:, 4], vote_results[:, 5]\n\n order = scores.argsort(descending=True)\n det = det[order]\n\n while det.shape[0] > 0:\n # IOU\n area = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])\n xx1 = torch.max(det[0, 0], det[:, 0])\n yy1 = torch.max(det[0, 1], det[:, 1])\n xx2 = torch.min(det[0, 2], det[:, 2])\n yy2 = torch.min(det[0, 3], det[:, 3])\n w = torch.clamp(xx2 - xx1, min=0.)\n h = torch.clamp(yy2 - yy1, min=0.)\n inter = w * h\n iou = inter / (area[0] + area[:] - inter)\n\n # get needed merge det and delete these det\n merge_index = torch.where(iou >= vote_thresh)[0]\n vote_det = det[merge_index, :]\n det = det[iou < vote_thresh]\n\n if merge_index.shape[0] <= 1:\n vote_results = torch.cat((vote_results, vote_det), dim=0)\n else:\n if vote_type == \"soft_vote\":\n vote_det_iou = iou[merge_index]\n det_accu_sum = self.get_soft_dets_sum(vote_det, vote_det_iou)\n elif vote_type == \"vote\":\n det_accu_sum = self.get_dets_sum(vote_det)\n vote_results = torch.cat((vote_results, det_accu_sum), dim=0)\n\n order = vote_results[:, 4].argsort(descending=True)\n vote_results = vote_results[order, :]\n\n return vote_results[:, :4], vote_results[:, 4], vote_results[:, 5]\n\n @staticmethod\n def get_dets_sum(vote_det):\n vote_det[:, :4] *= vote_det[:, 4:5].repeat(1, 4)\n max_score = vote_det[:, 4].max()\n det_accu_sum = torch.zeros((1, 6), device=vote_det.device)\n det_accu_sum[:, :4] = torch.sum(vote_det[:, :4], dim=0) / torch.sum(vote_det[:, 4])\n det_accu_sum[:, 4] = max_score\n det_accu_sum[:, 5] = vote_det[0, 5]\n return det_accu_sum\n\n @staticmethod\n def get_soft_dets_sum(vote_det, vote_det_iou):\n soft_vote_det = vote_det.detach().clone()\n soft_vote_det[:, 4] *= (1 - vote_det_iou)\n\n INFERENCE_TH = 0.05\n soft_index = torch.where(soft_vote_det[:, 4] >= INFERENCE_TH)[0]\n soft_vote_det = soft_vote_det[soft_index, :]\n\n vote_det[:, :4] *= vote_det[:, 4:5].repeat(1, 4)\n max_score = vote_det[:, 4].max()\n det_accu_sum = torch.zeros((1, 6), device=vote_det.device)\n det_accu_sum[:, :4] = torch.sum(vote_det[:, :4], dim=0) / torch.sum(vote_det[:, 4])\n det_accu_sum[:, 4] = max_score\n det_accu_sum[:, 5] = vote_det[0, 5]\n\n if soft_vote_det.shape[0] > 0:\n det_accu_sum = torch.cat((det_accu_sum, soft_vote_det), dim=0)\n return det_accu_sum"
},
{
"identifier": "add_model_ema_configs",
"path": "diffusiondet/util/model_ema.py",
"snippet": "def add_model_ema_configs(_C):\n _C.MODEL_EMA = type(_C)()\n _C.MODEL_EMA.ENABLED = False\n _C.MODEL_EMA.DECAY = 0.999\n # use the same as MODEL.DEVICE when empty\n _C.MODEL_EMA.DEVICE = \"\"\n # When True, loading the ema weight to the model when eval_only=True in build_model()\n _C.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY = False\n # when True, use YOLOX EMA: https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/utils/ema.py#L22\n _C.MODEL_EMA.YOLOX = False"
},
{
"identifier": "may_build_model_ema",
"path": "diffusiondet/util/model_ema.py",
"snippet": "def may_build_model_ema(cfg, model):\n if not cfg.MODEL_EMA.ENABLED:\n return\n model = _remove_ddp(model)\n assert not hasattr(\n model, \"ema_state\"\n ), \"Name `ema_state` is reserved for model ema.\"\n model.ema_state = EMAState()\n logger.info(\"Using Model EMA.\")"
},
{
"identifier": "may_get_ema_checkpointer",
"path": "diffusiondet/util/model_ema.py",
"snippet": "def may_get_ema_checkpointer(cfg, model):\n if not cfg.MODEL_EMA.ENABLED:\n return {}\n model = _remove_ddp(model)\n return {\"ema_state\": model.ema_state}"
},
{
"identifier": "EMAHook",
"path": "diffusiondet/util/model_ema.py",
"snippet": "class EMAHook(HookBase):\n def __init__(self, cfg, model):\n model = _remove_ddp(model)\n assert cfg.MODEL_EMA.ENABLED\n assert hasattr(\n model, \"ema_state\"\n ), \"Call `may_build_model_ema` first to initilaize the model ema\"\n self.model = model\n self.ema = self.model.ema_state\n self.device = cfg.MODEL_EMA.DEVICE or cfg.MODEL.DEVICE\n self.ema_updater = EMAUpdater(\n self.model.ema_state, decay=cfg.MODEL_EMA.DECAY, device=self.device, yolox=cfg.MODEL_EMA.YOLOX\n )\n\n def before_train(self):\n if self.ema.has_inited():\n self.ema.to(self.device)\n else:\n self.ema_updater.init_state(self.model)\n\n def after_train(self):\n pass\n\n def before_step(self):\n pass\n\n def after_step(self):\n if not self.model.train:\n return\n self.ema_updater.update(self.model)"
},
{
"identifier": "apply_model_ema_and_restore",
"path": "diffusiondet/util/model_ema.py",
"snippet": "@contextmanager\ndef apply_model_ema_and_restore(model, state=None):\n \"\"\"Apply ema stored in `model` to model and returns a function to restore\n the weights are applied\n \"\"\"\n model = _remove_ddp(model)\n\n if state is None:\n state = get_model_ema_state(model)\n\n old_state = EMAState.FromModel(model, state.device)\n state.apply_to(model)\n yield old_state\n old_state.apply_to(model)"
},
{
"identifier": "EMADetectionCheckpointer",
"path": "diffusiondet/util/model_ema.py",
"snippet": "class EMADetectionCheckpointer(DetectionCheckpointer):\n def resume_or_load(self, path: str, *, resume: bool = True) -> Dict[str, Any]:\n \"\"\"\n If `resume` is True, this method attempts to resume from the last\n checkpoint, if exists. Otherwise, load checkpoint from the given path.\n This is useful when restarting an interrupted training job.\n\n Args:\n path (str): path to the checkpoint.\n resume (bool): if True, resume from the last checkpoint if it exists\n and load the model together with all the checkpointables. Otherwise\n only load the model without loading any checkpointables.\n\n Returns:\n same as :meth:`load`.\n \"\"\"\n if resume and self.has_checkpoint():\n path = self.get_checkpoint_file()\n return self.load(path)\n else:\n # workaround `self.load`\n return self.load(path, checkpointables=None) # modify"
}
] | import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from diffusiondet.predictor import VisualizationDemo
from diffusiondet import DiffusionDetDatasetMapper, add_diffusiondet_config, DiffusionDetWithTTA
from diffusiondet.util.model_ema import add_model_ema_configs, may_build_model_ema, may_get_ema_checkpointer, EMAHook, \
apply_model_ema_and_restore, EMADetectionCheckpointer | 6,691 | # Copyright (c) Facebook, Inc. and its affiliates.
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
add_diffusiondet_config(cfg)
| # Copyright (c) Facebook, Inc. and its affiliates.
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
add_diffusiondet_config(cfg) | add_model_ema_configs(cfg) | 4 | 2023-11-17 02:37:37+00:00 | 8k |
fg320/DEASC | examples/12A_5x1_farm_dyn_tuning_dataset_grouping.py | [
{
"identifier": "WfModel",
"path": "deasc/wf_model.py",
"snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by pointing towards an input file.\n (FLORIS interface object).\n\n Args\n ----\n input file:(FLORIS .json input file).\n \"\"\"\n # Read and initialize input file\n self.input_file = input_file\n self.interface = floris_input_handler(self.input_file, path)\n\n # Assign wind farm model proporties\n self.D, self.H_hub, self.n_turbs = floris_properties(self)\n\n def set_aligned_layout(self, n_row, n_col, spac_x, spac_y, coordinates=False):\n \"\"\"\n Modify farm layout in aligned wind turbines with constant spacing,\n differing only from rows to columns. Flow field is also reinitialized.\n\n Args\n ----\n n_row: (float) number of turbine rows\n n_col: (float) number of turbine columns\n spac_x: (float) WT diam normalized turbines distance in x direction\n spac_y: (float) WT diam normalized turbines distance in y direction\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Input type check\n if not all(isinstance(i, int) for i in [n_row, n_col]) or \\\n not all(isinstance(j, (int, float)) for j in [spac_x, spac_y]):\n err_msg = \"Incorrect input value types\"\n raise ValueError(err_msg)\n\n # Calculate new coordinate farm layout\n layout_x = []\n layout_y = []\n for i in range(int(n_row)):\n for j in range(int(n_col)):\n layout_x.append(i * spac_x * self.D)\n layout_y.append(j * spac_y * self.D)\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def set_HR_layout(self, coordinates=False):\n \"\"\"\n Set Horns Rev wind farm layout to wind farm object and\n returns turbines' x and y coordinates if coordinates=True.\n\n Args\n ----\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Vestas V80 2 MW diameter check\n if self.D != 80:\n warning = \"Rotor diameter not from the Vestas V80 2 MW turbine\"\n warnings.warn(warning, UserWarning)\n\n n_rows = 10\n n_cols = 8\n spac_x = 7\n spac_y = 7\n angle = 6\n layout_x = []\n layout_y = []\n for i in range(int(n_rows)):\n for j in range(int(n_cols)):\n layout_x.append((i * spac_x * self.D) -\n (np.sin(np.radians(angle)) * j * spac_y * self.D))\n layout_y.append(j * spac_y * self.D * np.cos(np.radians(angle)))\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def farm_eval(self, yaw=None, ws=None, wd=None, ti=None, shear=None):\n \"\"\"\n Calculate farm flow field for given wind farm layout and input conditions.\n Return main outputs, such as yaw angles, turbines power, farm power, etc.\n\n Args\n ----\n yaw: (list, optional) turbines yaw angles (deg). Default to None.\n ws: (float, optional) input wind speeds (m/s). Default to None.\n wd: (float, optional) input wind directions (deg). Default to None.\n ti: (float, optional) input turbulence intensity. Default to None.\n shear: (float, optional) shear exponent. Default to None.\n\n Returns\n -------\n wf_pow: (float) WF power (MWatts).\n wt_pow: (np.array) WTs power (MWatts).\n wt_ti: (list) WTs turbulence intensity.\n wt_yaw: (np.array) WTs yaw angles (deg).\n \"\"\"\n # Main wind farm calculation\n wf_pow, wt_pow, wt_ti, wt_yaw, _ = floris_farm_eval(self,\n yaw,\n ws,\n wd,\n ti,\n shear)\n\n return (wf_pow, wt_pow, wt_ti, wt_yaw)\n\n def pow_yaw_sweep_1var(self, layout, var_info):\n \"\"\"\n Return wind farm power for a single yaw variable, either a\n single turbine or a single row of turbines. Sweep by row not possible\n for not aligned \"custom\" layouts.\n\n Args\n ----\n layout: (tuple)\n row: (integer) number of farm rows\n cols: (integer) number of farm columns\n or string \"custom\"\n var_info: (tuple)\n var_type: (string) \"T\" for turbine,\n \"R\" for row (not for custom layouts)\n var: (integer) turbine or row number\n var_value: (list of floats) variable values\n\n Returns\n -------\n obj_out: tuple\n obj: (list) objective values\n obj_func: (string) objective function\n var_info: (tuple) see input\n model: (string) model name\n \"\"\"\n # Extract inputs and check inputs\n var_type, var, var_value = var_info\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'R' and layout == \"custom\":\n err_msg = \"Row not allowed for custom layouts\"\n raise ValueError(err_msg)\n if var_type == 'R' and var > rows:\n err_msg = \"Row specified not in farm\"\n raise ValueError(err_msg)\n if var_type == 'T' and var > self.n_turbs:\n err_msg = \"Turbine specified not in farm\"\n raise ValueError(err_msg)\n\n # Calculations\n yaw_angles = np.array(floris_current_yaw(self))\n wf_pow = []\n\n for yaw_change in var_value:\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'T':\n yaw_angles[(var-1)] = yaw_change\n elif var_type == 'R':\n idx_1 = var*cols\n idx_0 = idx_1-cols\n yaw_angles[idx_0:idx_1] = yaw_change\n else:\n err_msg = \"var_type either 'T' or 'R'\"\n raise ValueError(err_msg)\n\n wf_pow_single, _, _, _ = self.farm_eval(yaw=yaw_angles)\n wf_pow.append(wf_pow_single)\n\n obj_out = (wf_pow, 'Farm Power')\n var_info = (var_type, var, var_value)\n print(\"Function exploration complete\")\n\n return obj_out, var_info"
},
{
"identifier": "Tuning",
"path": "deasc/tuning.py",
"snippet": "class Tuning:\n \"\"\"\n Parameter tuning class for a low-fidelity model, where one or more\n parameters are tuned to higher fidelity power measurements. In particular,\n the RMSE is minimised for single turbine power measurements for a single or\n the sum of multiple atmospheric conditions. The wind farm layout is assumed fixed.\n \"\"\"\n\n def __init__(self,\n wf_model,\n variables_class_list,\n variables_names_list,\n variables_bounds_list,\n obj_func_name='RMSE',\n opt_method='SLSQP',\n opt_options=None\n ):\n \"\"\"\n Args\n ----\n wf_model : WfModel object (low-fidelity model)\n single WfModel object to tune\n variables_class_list: list of strings\n list of classes of parameters to tune, one per parameter\n variables_names_list : list of strings\n list of parameter names to tune\n variables_bounds_list : list of tuples\n list of parameter bounds, upper and lower limits for each parameter\n obj_func_name: string\n objective function. Default set to \"RMSE\"\n opt_method: string\n optimization method. Dafault set to \"SLSQP\" (\"TURBO_1\" also available)\n opt_options: dict\n optimizer options. Default set to None\n \"\"\"\n self.obj_func_dict = {'RMSE': self._tuning_rmse_function}\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-12,\n 'eps': 0.1},\n \"TURBO_1\": {\"n_init\": 2*len(variables_names_list),\n \"max_evals\": 100,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.tuning_optimizer_dict = {'SLSQP': self._tuning_optimizer_scipy,\n 'TURBO_1': self._tuning_optimizer_turbo_1}\n\n self.wf_model = wf_model\n self.variables_class_list = variables_class_list\n self.variables_names_list = variables_names_list\n self.variables_bounds_list = variables_bounds_list\n\n self.obj_func_name = obj_func_name\n self.obj_func = self.obj_func_dict[self.obj_func_name]\n self.opt_method = opt_method\n if opt_options == None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n else:\n self.opt_options = opt_options\n self._tuning_optimizer = self.tuning_optimizer_dict[self.opt_method]\n\n self.tuning_data_received = False\n self.tuning_conditions_received = False\n\n print(\"\\nInitialised parameter tuning\")\n print(\"%i parameters to tune\" % (len(self.variables_names_list)))\n print(\"%s optimization method\" % (self.opt_method))\n\n def tuning_data(self, data_power_list):\n \"\"\"\n Provide training higher-fidelity data for parameter tuning.\n Limited to power of each turbine for each condition ('RMSE')\n\n Args\n ----\n data_power_list : list of lists\n For each condition:\n list of turbines power output ('RMSE')\n \"\"\"\n self.tuning_data_power_list = data_power_list\n self.tuning_data_received = True\n pass\n\n def tuning_conditions(self,\n yaw_angles_list,\n wind_directions_list,\n wind_speeds_list,\n turbulence_intensities_list,\n wind_shear_list):\n \"\"\"\n Define the wind farm conditions (yaw and atmospheric)\n of the higher-fidelity data.\n\n Args\n ----\n yaw_angles_list : list of lists\n For each condition, list of turbines yaw_angles\n wind_directions_list: list\n For each condtion, wind direction\n wind_speeds_list: list\n For each condtion, wind speed\n turbulence_intensities_list: list\n For each condtion, wind direction\n wind_shear_list: list\n For each condtion, wind shear\n \"\"\"\n self.yaw_angles_list = yaw_angles_list\n self.wind_directions_list = wind_directions_list\n self.wind_speeds_list = wind_speeds_list\n self.turbulence_intensities_list = turbulence_intensities_list\n self.wind_shear_list = wind_shear_list\n self.tuning_conditions_received = True\n pass\n\n def tune_parameters(self):\n \"\"\"\n Tune specified parameters of a WfModel object.\n Requires higher-fidelity tuning data and the related conditions to be\n previously specified (refer to Tuning methods: tuning_data and tuning_conditions).\n\n Returns\n -------\n wf_model_tuned: WfModel object\n WfModel object with parameters tuned\n wf_model_dict_opt: dictionary\n tuned WfModel object dictionary\n \"\"\"\n # Double check tuning data and conditions have been specified\n if self.tuning_data_received is False:\n err_msg = \"Tuning data not specified. Use tuning_data method.\"\n raise Exception(err_msg)\n if self.tuning_conditions_received is False:\n err_msg = \"Tuning conditions not specified. Use tuning_conditions method.\"\n raise Exception(err_msg)\n\n # Extract original wf_model object dictionary and print its parameters\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.models_dict = floris_extract_models_dict(self.wf_model_dict_original)\n floris_print_params(self.wf_model_dict_original,\n self.models_dict,\n \"Original model parameters\")\n\n # Extract initial variable values and normalise them\n self.variables_init = self._wf_model_dict_to_variables(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list)\n self.variables_init_norm = self._norm_variables(self.variables_init,\n self.variables_bounds_list)\n\n # Normalize variable bounds\n tmp = self.variables_bounds_list\n (self.variables_bounds_list_norm,\n self.variables_low_bound_list_norm,\n self.variables_upp_bound_list_norm) = self._norm_variables_bounds_lists(tmp)\n\n # Minimisation of error | Extract optimal variables\n self._tuning_optimizer()\n self.opt_variables = self._unnorm_variables(self.opt_variables_norm,\n self.variables_bounds_list)\n\n # Apply tuned parameters (opt_variables) to wf_model and print them\n self.wf_model_dict_opt = self._vars_to_wf_model_dict(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list,\n self.opt_variables)\n self.wf_model = floris_param_change_object(self.wf_model, self.wf_model_dict_opt)\n floris_print_params(self.wf_model_dict_opt,\n self.models_dict,\n \"Optimal model parameters\")\n\n return self.wf_model, self.wf_model_dict_opt\n\n # %% Private methods\n\n def _wf_model_dict_to_variables(self, wf_model_dict, class_list, names_list):\n variables = []\n for i in range(len(names_list)):\n variable = floris_extract_parameter(wf_model_dict,\n class_list[i],\n names_list[i])\n variables.append(variable)\n return variables\n\n def _norm_variables(self, variables, variables_bounds_list):\n variables_norm = ([norm(variables[i],\n variables_bounds_list[i][0],\n variables_bounds_list[i][1])\n for i in range(len(variables))])\n return variables_norm\n\n def _norm_variables_bounds_lists(self, variables_bounds_list):\n variables_bounds_list_norm = []\n variables_low_bound_list_norm = []\n variables_upp_bound_list_norm = []\n for i, variable_bounds in enumerate(variables_bounds_list):\n lower_bound_norm = norm(variable_bounds[0],\n variable_bounds[0],\n variable_bounds[1])\n upper_bound_norm = norm(variable_bounds[1],\n variable_bounds[0],\n variable_bounds[1])\n bound_norm_tuple = (lower_bound_norm, upper_bound_norm)\n variables_bounds_list_norm.append(bound_norm_tuple)\n variables_low_bound_list_norm.append(lower_bound_norm)\n variables_upp_bound_list_norm.append(upper_bound_norm)\n return (variables_bounds_list_norm,\n np.array(variables_low_bound_list_norm),\n np.array(variables_upp_bound_list_norm))\n\n def _unnorm_variables(self, variables_norm, variables_bounds_list):\n variables = ([unnorm(variables_norm[i],\n variables_bounds_list[i][0],\n variables_bounds_list[i][1])\n for i in range(len(variables_norm))])\n return variables\n\n def _vars_to_wf_model_dict(self,\n wf_model_dict_original,\n variables_class_list,\n variables_names_list,\n variables):\n wf_model_dict_new = copy.deepcopy(wf_model_dict_original)\n for i in range(len(variables)):\n wf_model_dict_new = floris_param_change_object_dict(wf_model_dict_new,\n variables_class_list[i],\n variables_names_list[i],\n variables[i])\n return wf_model_dict_new\n\n def _tuning_optimizer_scipy(self):\n self.opt_results = minimize(self.obj_func,\n self.variables_init_norm,\n method=self.opt_method,\n bounds=self.variables_bounds_list_norm,\n options=self.opt_options)\n self.opt_variables_norm = self.opt_results.x\n\n def _tuning_optimizer_turbo_1(self):\n turbo_1 = Turbo1(f=self.obj_func,\n lb=self.variables_low_bound_list_norm,\n ub=self.variables_upp_bound_list_norm,\n **self.opt_options,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n self.opt_variables_norm = x_best\n\n def _tuning_rmse_function(self, variables_norm):\n\n # Unnorm variables, create new wf_model dictionary\n variables = self._unnorm_variables(variables_norm, self.variables_bounds_list)\n wf_model_dict_new = self._vars_to_wf_model_dict(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list,\n variables)\n\n # Create new wf_model object and reinitialize (atmospheric conditions set later)\n self.wf_model = floris_param_change_object(self.wf_model, wf_model_dict_new)\n\n rmse = 0\n for i in range(len(self.tuning_data_power_list)):\n\n # Calculate wind turbine power outputs with model to tune\n floris_reinitialise_atmosphere(self.wf_model,\n ws=self.wind_speeds_list[i],\n wd=self.wind_directions_list[i],\n ti=self.turbulence_intensities_list[i],\n shear=self.wind_shear_list[i])\n yaw_angles = np.array([float(item) for item in self.yaw_angles_list[i]])\n power_turbines = floris_calculate_turbine_power(self.wf_model, yaw_angles)\n\n # Calculate root mean squared error single condition\n error = 0\n for j in range(len(power_turbines)):\n error += (self.tuning_data_power_list[i][j]-power_turbines[j])**2\n rmse_single = error/len(power_turbines)\n\n # Calculate sum of root mean squared errors\n rmse += rmse_single\n\n return rmse"
},
{
"identifier": "yaw_permutations_grouping_0last",
"path": "deasc/utils.py",
"snippet": "def yaw_permutations_grouping_0last(groups,\n yaw_per_sweep,\n yaw_bounds,\n dims_per_groups,\n n_0last):\n \"\"\"\n Generate all possible yaw permutations for the groups of dimensions, where\n dimensions in the same groups are forced equal, and adds \"n\" additional dimensions\n at the end which are held at zero.\n\n Args\n ----\n groups: int\n number of groups.\n yaw_per_sweep: int\n number of equally-spaced yaw angles in the given range for each dimension.\n yaw-bounds: tuple\n upper and lower limit of the yaw sweep shared for all dimensions.\n groups: list of integers\n number of equal dimensions per group.\n n_0last: int\n additional dimensions to keep fixed at 0. These are added to the end.\n\n Returns\n -------\n unique_combinations: list of lists\n list of all possible yaw combinations.\n \"\"\"\n yaw_list_single = np.linspace(yaw_bounds[0], yaw_bounds[1], yaw_per_sweep).tolist()\n yaw_list = [yaw_list_single for _ in range(groups)]\n unique_combinations_ = list(itertools.product(*yaw_list))\n yaw_last = [0]*n_0last\n unique_combinations = []\n for combination_ in unique_combinations_:\n combination = []\n for i, group in enumerate(combination_):\n combination += [group]*dims_per_groups[i]\n combination += yaw_last\n unique_combinations.append(combination)\n return unique_combinations"
},
{
"identifier": "floris_extract_object_dict",
"path": "deasc/utils_floris.py",
"snippet": "def floris_extract_object_dict(wf_model):\n \"\"\"Extract and return the current FLORIS object dictionary.\"\"\"\n return wf_model.interface.floris.as_dict()"
},
{
"identifier": "floris_extract_parameter",
"path": "deasc/utils_floris.py",
"snippet": "def floris_extract_parameter(wf_model_dict, param_class, param_name):\n \"\"\"Extract and return the current parameter value of a FLORIS object parameter.\"\"\"\n models_dict = floris_extract_models_dict(wf_model_dict)\n return wf_model_dict['wake'][param_class][models_dict[param_class]][param_name]"
},
{
"identifier": "floris_param_change_object_dict",
"path": "deasc/utils_floris.py",
"snippet": "def floris_param_change_object_dict(wf_model_dict, param_class, param_name, param_value):\n \"\"\"\n Change FLORIS object with a new model parameter, return new FLORIS object dictionary.\n FLORIS object is not reinitialised (see function floris_parameter_change_object).\n \"\"\"\n wf_model_dict_new = copy.deepcopy(wf_model_dict)\n models_dict = floris_extract_models_dict(wf_model_dict_new)\n (wf_model_dict_new['wake'][param_class]\n [models_dict[param_class]][param_name]) = param_value\n return wf_model_dict_new"
},
{
"identifier": "floris_param_change_object",
"path": "deasc/utils_floris.py",
"snippet": "def floris_param_change_object(wf_model, wf_model_dict_new):\n \"\"\"Change FLORIS object with new object dictionary. Also reinitialise farm layout.\"\"\"\n x_reinit, y_reinit = wf_model.interface.get_turbine_layout()\n wf_model.interface = FI(wf_model_dict_new)\n wf_model.interface.reinitialize(layout_x=x_reinit, layout_y=y_reinit)\n return wf_model"
}
] | import numpy as np
import multiprocessing as mp
from deasc import WfModel
from deasc import Tuning
from deasc.utils import yaw_permutations_grouping_0last
from deasc.utils_floris import (
floris_extract_object_dict,
floris_extract_parameter,
floris_param_change_object_dict,
floris_param_change_object
) | 5,769 |
"""
This example shows how to create, in parallel, an optimal parameter dataset for Jensen
wake expansion parameter k tuned to GCH model power predictions using the grouping
approach for a 5x1 wind farm of NREL 5 MW turbines. The training conditions are defined
as the yaw permutations of the two most upstream groups, each of two turbines. For each
condition, parameter k is tuned on that single condition and added to the optimal
parameter dataset.
"""
# %% Parameter tuning function - Run a single optimisation for each trainign condition
def function(i, yaw, inflow, wt_pow_training_list):
# Extract inflow
wd, ws, ti, shear = inflow
# Initialise trainee and set farm layout
path = "./inputs/"
input_file_trainee = "jensen.yaml"
trainee = WfModel(input_file_trainee, path)
trainee.set_aligned_layout(5, 1, 7, 5)
# Set kd deflection parameter
trainee_dict = floris_extract_object_dict(trainee)
trainee_dict = floris_param_change_object_dict(trainee_dict,
'wake_deflection_parameters',
'kd',
0.3)
|
"""
This example shows how to create, in parallel, an optimal parameter dataset for Jensen
wake expansion parameter k tuned to GCH model power predictions using the grouping
approach for a 5x1 wind farm of NREL 5 MW turbines. The training conditions are defined
as the yaw permutations of the two most upstream groups, each of two turbines. For each
condition, parameter k is tuned on that single condition and added to the optimal
parameter dataset.
"""
# %% Parameter tuning function - Run a single optimisation for each trainign condition
def function(i, yaw, inflow, wt_pow_training_list):
# Extract inflow
wd, ws, ti, shear = inflow
# Initialise trainee and set farm layout
path = "./inputs/"
input_file_trainee = "jensen.yaml"
trainee = WfModel(input_file_trainee, path)
trainee.set_aligned_layout(5, 1, 7, 5)
# Set kd deflection parameter
trainee_dict = floris_extract_object_dict(trainee)
trainee_dict = floris_param_change_object_dict(trainee_dict,
'wake_deflection_parameters',
'kd',
0.3) | trainee = floris_param_change_object(trainee, trainee_dict) | 6 | 2023-11-10 18:13:27+00:00 | 8k |
CPES-Power-and-Energy-Systems/interoperable-recommender-tso | energy_app/packages/entsoe-py/entsoe/parsers.py | [
{
"identifier": "PSRTYPE_MAPPINGS",
"path": "energy_app/packages/entsoe-py/entsoe/mappings.py",
"snippet": "PSRTYPE_MAPPINGS = {\n 'A03': 'Mixed',\n 'A04': 'Generation',\n 'A05': 'Load',\n 'B01': 'Biomass',\n 'B02': 'Fossil Brown coal/Lignite',\n 'B03': 'Fossil Coal-derived gas',\n 'B04': 'Fossil Gas',\n 'B05': 'Fossil Hard coal',\n 'B06': 'Fossil Oil',\n 'B07': 'Fossil Oil shale',\n 'B08': 'Fossil Peat',\n 'B09': 'Geothermal',\n 'B10': 'Hydro Pumped Storage',\n 'B11': 'Hydro Run-of-river and poundage',\n 'B12': 'Hydro Water Reservoir',\n 'B13': 'Marine',\n 'B14': 'Nuclear',\n 'B15': 'Other renewable',\n 'B16': 'Solar',\n 'B17': 'Waste',\n 'B18': 'Wind Offshore',\n 'B19': 'Wind Onshore',\n 'B20': 'Other',\n 'B21': 'AC Link',\n 'B22': 'DC Link',\n 'B23': 'Substation',\n 'B24': 'Transformer'}"
},
{
"identifier": "DOCSTATUS",
"path": "energy_app/packages/entsoe-py/entsoe/mappings.py",
"snippet": "DOCSTATUS = {'A01': 'Intermediate',\n 'A02': 'Final',\n 'A05': 'Active',\n 'A09': 'Cancelled',\n 'A13': 'Withdrawn',\n 'X01': 'Estimated'}"
},
{
"identifier": "BSNTYPE",
"path": "energy_app/packages/entsoe-py/entsoe/mappings.py",
"snippet": "BSNTYPE = {'A29': 'Already allocated capacity (AAC)',\n 'A43': 'Requested capacity (without price)',\n 'A46': 'System Operator redispatching',\n 'A53': 'Planned maintenance',\n 'A54': 'Unplanned outage',\n 'A85': 'Internal redispatch',\n 'A95': 'Frequency containment reserve',\n 'A96': 'Automatic frequency restoration reserve',\n 'A97': 'Manual frequency restoration reserve',\n 'A98': 'Replacement reserve',\n 'B01': 'Interconnector network evolution',\n 'B02': 'Interconnector network dismantling',\n 'B03': 'Counter trade',\n 'B04': 'Congestion costs',\n 'B05': 'Capacity allocated (including price)',\n 'B07': 'Auction revenue',\n 'B08': 'Total nominated capacity',\n 'B09': 'Net position',\n 'B10': 'Congestion income',\n 'B11': 'Production unit'}"
},
{
"identifier": "Area",
"path": "energy_app/packages/entsoe-py/entsoe/mappings.py",
"snippet": "class Area(enum.Enum):\n \"\"\"\n ENUM containing 3 things about an Area: CODE, Meaning, Timezone\n \"\"\"\n def __new__(cls, *args, **kwds):\n obj = object.__new__(cls)\n obj._value_ = args[0]\n return obj\n\n # ignore the first param since it's already set by __new__\n def __init__(self, _: str, meaning: str, tz: str):\n self._meaning = meaning\n self._tz = tz\n\n def __str__(self):\n return self.value\n\n @property\n def meaning(self):\n return self._meaning\n\n @property\n def tz(self):\n return self._tz\n\n @property\n def code(self):\n return self.value\n\n # List taken directly from the API Docs\n DE_50HZ = '10YDE-VE-------2', '50Hertz CA, DE(50HzT) BZA', 'Europe/Berlin',\n AL = '10YAL-KESH-----5', 'Albania, OST BZ / CA / MBA', 'Europe/Tirane',\n DE_AMPRION = '10YDE-RWENET---I', 'Amprion CA', 'Europe/Berlin',\n AT = '10YAT-APG------L', 'Austria, APG BZ / CA / MBA', 'Europe/Vienna',\n BY = '10Y1001A1001A51S', 'Belarus BZ / CA / MBA', 'Europe/Minsk',\n BE = '10YBE----------2', 'Belgium, Elia BZ / CA / MBA', 'Europe/Brussels',\n BA = '10YBA-JPCC-----D', 'Bosnia Herzegovina, NOS BiH BZ / CA / MBA', 'Europe/Sarajevo',\n BG = '10YCA-BULGARIA-R', 'Bulgaria, ESO BZ / CA / MBA', 'Europe/Sofia',\n CZ_DE_SK = '10YDOM-CZ-DE-SKK', 'BZ CZ+DE+SK BZ / BZA', 'Europe/Prague',\n HR = '10YHR-HEP------M', 'Croatia, HOPS BZ / CA / MBA', 'Europe/Zagreb',\n CWE = '10YDOM-REGION-1V', 'CWE Region', 'Europe/Brussels',\n CY = '10YCY-1001A0003J', 'Cyprus, Cyprus TSO BZ / CA / MBA', 'Asia/Nicosia',\n CZ = '10YCZ-CEPS-----N', 'Czech Republic, CEPS BZ / CA/ MBA', 'Europe/Prague',\n DE_AT_LU = '10Y1001A1001A63L', 'DE-AT-LU BZ', 'Europe/Berlin',\n DE_LU = '10Y1001A1001A82H', 'DE-LU BZ / MBA', 'Europe/Berlin',\n DK = '10Y1001A1001A65H', 'Denmark', 'Europe/Copenhagen',\n DK_1 = '10YDK-1--------W', 'DK1 BZ / MBA', 'Europe/Copenhagen',\n DK_1_NO_1 = '46Y000000000007M', 'DK1 NO1 BZ', 'Europe/Copenhagen',\n DK_2 = '10YDK-2--------M', 'DK2 BZ / MBA', 'Europe/Copenhagen',\n DK_CA = '10Y1001A1001A796', 'Denmark, Energinet CA', 'Europe/Copenhagen',\n EE = '10Y1001A1001A39I', 'Estonia, Elering BZ / CA / MBA', 'Europe/Tallinn',\n FI = '10YFI-1--------U', 'Finland, Fingrid BZ / CA / MBA', 'Europe/Helsinki',\n MK = '10YMK-MEPSO----8', 'Former Yugoslav Republic of Macedonia, MEPSO BZ / CA / MBA', 'Europe/Skopje',\n FR = '10YFR-RTE------C', 'France, RTE BZ / CA / MBA', 'Europe/Paris',\n DE = '10Y1001A1001A83F', 'Germany', 'Europe/Berlin'\n GR = '10YGR-HTSO-----Y', 'Greece, IPTO BZ / CA/ MBA', 'Europe/Athens',\n HU = '10YHU-MAVIR----U', 'Hungary, MAVIR CA / BZ / MBA', 'Europe/Budapest',\n IS = 'IS', 'Iceland', 'Atlantic/Reykjavik',\n IE_SEM = '10Y1001A1001A59C', 'Ireland (SEM) BZ / MBA', 'Europe/Dublin',\n IE = '10YIE-1001A00010', 'Ireland, EirGrid CA', 'Europe/Dublin',\n IT = '10YIT-GRTN-----B', 'Italy, IT CA / MBA', 'Europe/Rome',\n IT_SACO_AC = '10Y1001A1001A885', 'Italy_Saco_AC', 'Europe/Rome',\n IT_CALA = '10Y1001C--00096J', 'IT-Calabria BZ', 'Europe/Rome',\n IT_SACO_DC = '10Y1001A1001A893', 'Italy_Saco_DC', 'Europe/Rome',\n IT_BRNN = '10Y1001A1001A699', 'IT-Brindisi BZ', 'Europe/Rome',\n IT_CNOR = '10Y1001A1001A70O', 'IT-Centre-North BZ', 'Europe/Rome',\n IT_CSUD = '10Y1001A1001A71M', 'IT-Centre-South BZ', 'Europe/Rome',\n IT_FOGN = '10Y1001A1001A72K', 'IT-Foggia BZ', 'Europe/Rome',\n IT_GR = '10Y1001A1001A66F', 'IT-GR BZ', 'Europe/Rome',\n IT_MACRO_NORTH = '10Y1001A1001A84D', 'IT-MACROZONE NORTH MBA', 'Europe/Rome',\n IT_MACRO_SOUTH = '10Y1001A1001A85B', 'IT-MACROZONE SOUTH MBA', 'Europe/Rome',\n IT_MALTA = '10Y1001A1001A877', 'IT-Malta BZ', 'Europe/Rome',\n IT_NORD = '10Y1001A1001A73I', 'IT-North BZ', 'Europe/Rome',\n IT_NORD_AT = '10Y1001A1001A80L', 'IT-North-AT BZ', 'Europe/Rome',\n IT_NORD_CH = '10Y1001A1001A68B', 'IT-North-CH BZ', 'Europe/Rome',\n IT_NORD_FR = '10Y1001A1001A81J', 'IT-North-FR BZ', 'Europe/Rome',\n IT_NORD_SI = '10Y1001A1001A67D', 'IT-North-SI BZ', 'Europe/Rome',\n IT_PRGP = '10Y1001A1001A76C', 'IT-Priolo BZ', 'Europe/Rome',\n IT_ROSN = '10Y1001A1001A77A', 'IT-Rossano BZ', 'Europe/Rome',\n IT_SARD = '10Y1001A1001A74G', 'IT-Sardinia BZ', 'Europe/Rome',\n IT_SICI = '10Y1001A1001A75E', 'IT-Sicily BZ', 'Europe/Rome',\n IT_SUD = '10Y1001A1001A788', 'IT-South BZ', 'Europe/Rome',\n RU_KGD = '10Y1001A1001A50U', 'Kaliningrad BZ / CA / MBA', 'Europe/Kaliningrad',\n LV = '10YLV-1001A00074', 'Latvia, AST BZ / CA / MBA', 'Europe/Riga',\n LT = '10YLT-1001A0008Q', 'Lithuania, Litgrid BZ / CA / MBA', 'Europe/Vilnius',\n LU = '10YLU-CEGEDEL-NQ', 'Luxembourg, CREOS CA', 'Europe/Luxembourg',\n LU_BZN = '10Y1001A1001A82H', 'Luxembourg', 'Europe/Luxembourg',\n MT = '10Y1001A1001A93C', 'Malta, Malta BZ / CA / MBA', 'Europe/Malta',\n ME = '10YCS-CG-TSO---S', 'Montenegro, CGES BZ / CA / MBA', 'Europe/Podgorica',\n GB = '10YGB----------A', 'National Grid BZ / CA/ MBA', 'Europe/London',\n GB_IFA = '10Y1001C--00098F', 'GB(IFA) BZN', 'Europe/London',\n GB_IFA2 = '17Y0000009369493', 'GB(IFA2) BZ', 'Europe/London',\n GB_ELECLINK = '11Y0-0000-0265-K', 'GB(ElecLink) BZN', 'Europe/London',\n UK = '10Y1001A1001A92E', 'United Kingdom', 'Europe/London',\n NL = '10YNL----------L', 'Netherlands, TenneT NL BZ / CA/ MBA', 'Europe/Amsterdam',\n NO_1 = '10YNO-1--------2', 'NO1 BZ / MBA', 'Europe/Oslo',\n NO_1A = '10Y1001A1001A64J', 'NO1 A BZ', 'Europe/Oslo',\n NO_2 = '10YNO-2--------T', 'NO2 BZ / MBA', 'Europe/Oslo',\n NO_2_NSL = '50Y0JVU59B4JWQCU', 'NO2 NSL BZ / MBA', 'Europe/Oslo',\n NO_2A = '10Y1001C--001219', 'NO2 A BZ', 'Europe/Oslo',\n NO_3 = '10YNO-3--------J', 'NO3 BZ / MBA', 'Europe/Oslo',\n NO_4 = '10YNO-4--------9', 'NO4 BZ / MBA', 'Europe/Oslo',\n NO_5 = '10Y1001A1001A48H', 'NO5 BZ / MBA', 'Europe/Oslo',\n NO = '10YNO-0--------C', 'Norway, Norway MBA, Stattnet CA', 'Europe/Oslo',\n PL_CZ = '10YDOM-1001A082L', 'PL-CZ BZA / CA', 'Europe/Warsaw',\n PL = '10YPL-AREA-----S', 'Poland, PSE SA BZ / BZA / CA / MBA', 'Europe/Warsaw',\n PT = '10YPT-REN------W', 'Portugal, REN BZ / CA / MBA', 'Europe/Lisbon',\n MD = '10Y1001A1001A990', 'Republic of Moldova, Moldelectica BZ/CA/MBA', 'Europe/Chisinau',\n RO = '10YRO-TEL------P', 'Romania, Transelectrica BZ / CA/ MBA', 'Europe/Bucharest',\n RU = '10Y1001A1001A49F', 'Russia BZ / CA / MBA', 'Europe/Moscow',\n SE_1 = '10Y1001A1001A44P', 'SE1 BZ / MBA', 'Europe/Stockholm',\n SE_2 = '10Y1001A1001A45N', 'SE2 BZ / MBA', 'Europe/Stockholm',\n SE_3 = '10Y1001A1001A46L', 'SE3 BZ / MBA', 'Europe/Stockholm',\n SE_4 = '10Y1001A1001A47J', 'SE4 BZ / MBA', 'Europe/Stockholm',\n RS = '10YCS-SERBIATSOV', 'Serbia, EMS BZ / CA / MBA', 'Europe/Belgrade',\n SK = '10YSK-SEPS-----K', 'Slovakia, SEPS BZ / CA / MBA', 'Europe/Bratislava',\n SI = '10YSI-ELES-----O', 'Slovenia, ELES BZ / CA / MBA', 'Europe/Ljubljana',\n GB_NIR = '10Y1001A1001A016', 'Northern Ireland, SONI CA', 'Europe/Belfast',\n ES = '10YES-REE------0', 'Spain, REE BZ / CA / MBA', 'Europe/Madrid',\n SE = '10YSE-1--------K', 'Sweden, Sweden MBA, SvK CA', 'Europe/Stockholm',\n CH = '10YCH-SWISSGRIDZ', 'Switzerland, Swissgrid BZ / CA / MBA', 'Europe/Zurich',\n DE_TENNET = '10YDE-EON------1', 'TenneT GER CA', 'Europe/Berlin',\n DE_TRANSNET = '10YDE-ENBW-----N', 'TransnetBW CA', 'Europe/Berlin',\n TR = '10YTR-TEIAS----W', 'Turkey BZ / CA / MBA', 'Europe/Istanbul',\n UA = '10Y1001C--00003F', 'Ukraine, Ukraine BZ, MBA', 'Europe/Kiev',\n UA_DOBTPP = '10Y1001A1001A869', 'Ukraine-DobTPP CTA', 'Europe/Kiev',\n UA_BEI = '10YUA-WEPS-----0', 'Ukraine BEI CTA', 'Europe/Kiev',\n UA_IPS = '10Y1001C--000182', 'Ukraine IPS CTA', 'Europe/Kiev',\n XK = '10Y1001C--00100H', 'Kosovo/ XK CA / XK BZN', 'Europe/Rome'"
}
] | import sys
import zipfile
import warnings
import bs4
import pandas as pd
from io import BytesIO
from typing import Union
from bs4.builder import XMLParsedAsHTMLWarning
from .mappings import PSRTYPE_MAPPINGS, DOCSTATUS, BSNTYPE, Area | 6,229 | series = series.sort_index()
series.index = _parse_datetimeindex(soup, tz)
return series
def _parse_installed_capacity_per_plant(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
extract_vals = {'Name': 'registeredresource.name',
'Production Type': 'psrtype',
'Bidding Zone': 'inbiddingzone_domain.mrid',
# 'Status': 'businesstype',
'Voltage Connection Level [kV]':
'voltage_powersystemresources.highvoltagelimit'}
series = pd.Series(extract_vals).apply(lambda v: soup.find(v).text)
# extract only first point
series['Installed Capacity [MW]'] = \
soup.find_all('point')[0].find('quantity').text
series.name = soup.find('registeredresource.mrid').text
return series
def _parse_datetimeindex(soup, tz=None):
"""
Create a datetimeindex from a parsed beautifulsoup,
given that it contains the elements 'start', 'end'
and 'resolution'
Parameters
----------
soup : bs4.element.tag
tz: str
Returns
-------
pd.DatetimeIndex
"""
start = pd.Timestamp(soup.find('start').text)
end = pd.Timestamp(soup.find_all('end')[-1].text)
if tz is not None:
start = start.tz_convert(tz)
end = end.tz_convert(tz)
delta = _resolution_to_timedelta(res_text=soup.find('resolution').text)
index = pd.date_range(start=start, end=end, freq=delta, inclusive='left')
if tz is not None:
dst_jump = len(set(index.map(lambda d: d.dst()))) > 1
if dst_jump and delta == "7D":
# For a weekly granularity, if we jump over the DST date in October,
# date_range erronously returns an additional index element
# because that week contains 169 hours instead of 168.
index = index[:-1]
index = index.tz_convert("UTC")
return index
def _parse_crossborder_flows_timeseries(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
flows = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
flows.append(float(point.find('quantity').text))
series = pd.Series(index=positions, data=flows)
series = series.sort_index()
try:
series.index = _parse_datetimeindex(soup)
except ValueError as ex:
if "Length mismatch" in str(ex):
series.index = _parse_datetimeindex(soup)[:-1]
return series
def _resolution_to_timedelta(res_text: str) -> str:
"""
Convert an Entsoe resolution to something that pandas can understand
"""
resolutions = {
'PT60M': '60min',
'P1Y': '12M',
'PT15M': '15min',
'PT30M': '30min',
'P1D': '1D',
'P7D': '7D',
'P1M': '1M',
}
delta = resolutions.get(res_text)
if delta is None:
raise NotImplementedError("Sorry, I don't know what to do with the "
"resolution '{}', because there was no "
"documentation to be found of this format. "
"Everything is hard coded. Please open an "
"issue.".format(res_text))
return delta
# Define inverse bidding zone dico to look up bidding zone labels from the
# domain code in the unavailibility parsers:
|
warnings.filterwarnings('ignore', category=XMLParsedAsHTMLWarning)
GENERATION_ELEMENT = "inBiddingZone_Domain.mRID"
CONSUMPTION_ELEMENT = "outBiddingZone_Domain.mRID"
def _extract_timeseries(xml_text):
"""
Parameters
----------
xml_text : str
Yields
-------
bs4.element.tag
"""
if not xml_text:
return
soup = bs4.BeautifulSoup(xml_text, 'html.parser')
for timeseries in soup.find_all('timeseries'):
yield timeseries
def parse_prices(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.Series
"""
series = {
'15T': [],
'30T': [],
'60T': []
}
for soup in _extract_timeseries(xml_text):
soup_series = _parse_price_timeseries(soup)
series[soup_series.index.freqstr].append(soup_series)
for freq, freq_series in series.items():
if len(freq_series) > 0:
series[freq] = pd.concat(freq_series).sort_index()
return series
def parse_netpositions(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.Series
"""
series = []
for soup in _extract_timeseries(xml_text):
series.append(_parse_netposition_timeseries(soup))
series = pd.concat(series)
series = series.sort_index()
return series
def parse_loads(xml_text, process_type='A01'):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.DataFrame
"""
if process_type == 'A01' or process_type == 'A16':
series = []
for soup in _extract_timeseries(xml_text):
series.append(_parse_load_timeseries(soup))
series = pd.concat(series)
series = series.sort_index()
return pd.DataFrame({
'Forecasted Load' if process_type == 'A01' else 'Actual Load': series
})
else:
series_min = pd.Series(dtype='object')
series_max = pd.Series(dtype='object')
for soup in _extract_timeseries(xml_text):
t = _parse_load_timeseries(soup)
if soup.find('businesstype').text == 'A60':
series_min = series_min.append(t)
elif soup.find('businesstype').text == 'A61':
series_max = series_max.append(t)
else:
continue
return pd.DataFrame({
'Min Forecasted Load': series_min,
'Max Forecasted Load': series_max
})
def parse_generation(
xml_text: str,
per_plant: bool = False,
include_eic: bool = False,
nett: bool = False) -> Union[pd.DataFrame, pd.Series]:
"""
Parameters
----------
xml_text : str
per_plant : bool
Decide if you need the parser that can extract plant info as well.
nett : bool
If you want to condense generation and consumption of a plant into a
nett number
include_eic: bool
If you want to include the eic code of a plan in the output
Returns
-------
pd.DataFrame | pd.Series
"""
all_series = dict()
for soup in _extract_timeseries(xml_text):
ts = _parse_generation_timeseries(soup, per_plant=per_plant, include_eic=include_eic)
# check if we already have a series of this name
series = all_series.get(ts.name)
if series is None:
# If not, we just save ts
all_series[ts.name] = ts
else:
# If yes, we extend it
series = pd.concat([series, ts])
series.sort_index(inplace=True)
all_series[series.name] = series
# drop duplicates in all series
for name in all_series:
ts = all_series[name]
all_series[name] = ts[~ts.index.duplicated(keep='first')]
df = pd.DataFrame.from_dict(all_series)
df.sort_index(inplace=True)
df = _calc_nett_and_drop_redundant_columns(df, nett=nett)
return df
def _calc_nett_and_drop_redundant_columns(
df: pd.DataFrame, nett: bool) -> pd.DataFrame:
def _calc_nett(_df):
try:
if set(['Actual Aggregated']).issubset(_df):
if set(['Actual Consumption']).issubset(_df):
_new = _df['Actual Aggregated'].fillna(0) - _df[
'Actual Consumption'].fillna(0)
else:
_new = _df['Actual Aggregated'].fillna(0)
else:
_new = -_df['Actual Consumption'].fillna(0)
except KeyError:
print ('Netting production and consumption not possible. Column not found')
return _new
if hasattr(df.columns, 'levels'):
if len(df.columns.levels[-1]) == 1:
# Drop the extra header, if it is redundant
df = df.droplevel(axis=1, level=-1)
elif nett:
frames = []
for column in df.columns.levels[-2]:
new = _calc_nett(df[column])
new.name = column
frames.append(new)
df = pd.concat(frames, axis=1)
else:
if nett:
df = _calc_nett(df)
elif len(df.columns) == 1:
df = df.squeeze()
return df
def parse_installed_capacity_per_plant(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.DataFrame
"""
all_series = {}
for soup in _extract_timeseries(xml_text):
s = _parse_installed_capacity_per_plant(soup)
series = all_series.get(s.name)
if series is None:
all_series[s.name] = s
else:
series = pd.concat([series, s])
series.sort_index()
all_series[series.name] = series
for name in all_series:
ts = all_series[name]
all_series[name] = ts[~ts.index.duplicated(keep='first')]
df = pd.DataFrame.from_dict(all_series).T
df['Production Type'] = df['Production Type'].map(PSRTYPE_MAPPINGS)
df['Name'] = df['Name'].str.encode('latin-1').str.decode('utf-8')
# df['Status'] = df['Status'].map(BSNTYPE)
return df
def parse_water_hydro(xml_text, tz):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.Series
"""
all_series = []
for soup in _extract_timeseries(xml_text):
all_series.append(_parse_water_hydro_timeseries(soup, tz=tz))
series = pd.concat(all_series)
return series
def parse_crossborder_flows(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.Series
"""
series = []
for soup in _extract_timeseries(xml_text):
series.append(_parse_crossborder_flows_timeseries(soup))
series = pd.concat(series)
series = series.sort_index()
return series
def parse_imbalance_prices(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.DataFrame
"""
timeseries_blocks = _extract_timeseries(xml_text)
frames = (_parse_imbalance_prices_timeseries(soup)
for soup in timeseries_blocks)
df = pd.concat(frames, axis=1)
df = df.stack().unstack() # ad-hoc fix to prevent column splitting by NaNs
df.sort_index(inplace=True)
return df
def parse_imbalance_volumes(xml_text):
"""
Parameters
----------
xml_text : str
Returns
-------
pd.DataFrame
"""
timeseries_blocks = _extract_timeseries(xml_text)
frames = (_parse_imbalance_volumes_timeseries(soup)
for soup in timeseries_blocks)
df = pd.concat(frames, axis=1)
df = df.stack().unstack() # ad-hoc fix to prevent column splitting by NaNs
df.sort_index(inplace=True)
return df
def parse_procured_balancing_capacity(xml_text, tz):
"""
Parameters
----------
xml_text : str
tz: str
Returns
-------
pd.DataFrame
"""
timeseries_blocks = _extract_timeseries(xml_text)
frames = (_parse_procured_balancing_capacity(soup, tz)
for soup in timeseries_blocks)
df = pd.concat(frames, axis=1)
df.sort_index(axis=0, inplace=True)
df.sort_index(axis=1, inplace=True)
return df
def _parse_procured_balancing_capacity(soup, tz):
"""
Parameters
----------
soup : bs4.element.tag
tz: str
Returns
-------
pd.DataFrame
"""
direction = {
'A01': 'Up',
'A02': 'Down'
}
flow_direction = direction[soup.find('flowdirection.direction').text]
period = soup.find('period')
start = pd.to_datetime(period.find('timeinterval').find('start').text)
end = pd.to_datetime(period.find('timeinterval').find('end').text)
resolution = _resolution_to_timedelta(period.find('resolution').text)
tx = pd.date_range(start=start, end=end, freq=resolution, inclusive='left')
points = period.find_all('point')
df = pd.DataFrame(index=tx, columns=['Price', 'Volume'])
for dt, point in zip(tx, points):
df.loc[dt, 'Price'] = float(point.find('procurement_price.amount').text)
df.loc[dt, 'Volume'] = float(point.find('quantity').text)
mr_id = int(soup.find('mrid').text)
df.columns = pd.MultiIndex.from_product(
[[flow_direction], [mr_id], df.columns],
names=('direction', 'mrid', 'unit')
)
return df
def parse_contracted_reserve(xml_text, tz, label):
"""
Parameters
----------
xml_text : str
tz: str
label: str
Returns
-------
pd.DataFrame
"""
timeseries_blocks = _extract_timeseries(xml_text)
frames = (_parse_contracted_reserve_series(soup, tz, label)
for soup in timeseries_blocks)
df = pd.concat(frames, axis=1)
# Ad-hoc fix to prevent that columns are split by NaNs:
df = df.groupby(axis=1, level = [0,1]).mean()
df.sort_index(inplace=True)
return df
def _parse_contracted_reserve_series(soup, tz, label):
"""
Parameters
----------
soup : bs4.element.tag
tz: str
label: str
Returns
-------
pd.Series
"""
positions = []
prices = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
prices.append(float(point.find(label).text))
df = pd.DataFrame(data={'position': positions,
label: prices})
df = df.set_index(['position'])
df.sort_index(inplace=True)
index = _parse_datetimeindex(soup, tz)
if len(index) > len(df.index):
print("Shortening index", file=sys.stderr)
df.index = index[:len(df.index)]
else:
df.index = index
df.index.name = None
df.columns.name = None
direction_dico = {'A01': 'Up',
'A02': 'Down',
'A03': 'Symmetric'}
# First column level: the type of reserve
reserve_type = BSNTYPE[soup.find("businesstype").text]
df.rename(columns={label: reserve_type}, inplace=True)
# Second column level: the flow direction
direction = direction_dico[soup.find("flowdirection.direction").text]
df.columns = pd.MultiIndex.from_product([df.columns, [direction]])
return df
def parse_imbalance_prices_zip(zip_contents: bytes) -> pd.DataFrame:
"""
Parameters
----------
zip_contents : bytes
Returns
-------
pd.DataFrame
"""
def gen_frames(archive):
with zipfile.ZipFile(BytesIO(archive), 'r') as arc:
for f in arc.infolist():
if f.filename.endswith('xml'):
frame = parse_imbalance_prices(xml_text=arc.read(f))
yield frame
frames = gen_frames(zip_contents)
df = pd.concat(frames)
df.sort_index(inplace=True)
return df
def _parse_imbalance_prices_timeseries(soup) -> pd.DataFrame:
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.DataFrame
"""
positions = []
amounts = []
categories = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
amounts.append(float(point.find('imbalance_price.amount').text))
if point.find('imbalance_price.category'):
categories.append(point.find('imbalance_price.category').text)
else:
categories.append('None')
df = pd.DataFrame(data={'position': positions,
'amount': amounts, 'category': categories})
df = df.set_index(['position', 'category']).unstack()
df.sort_index(inplace=True)
df.index = _parse_datetimeindex(soup)
df = df.xs('amount', axis=1)
df.index.name = None
df.columns.name = None
df.rename(columns={'A04': 'Long', 'A05': 'Short',
'None': 'Price for Consumption'}, inplace=True)
return df
def parse_imbalance_volumes_zip(zip_contents: bytes) -> pd.DataFrame:
"""
Parameters
----------
zip_contents : bytes
Returns
-------
pd.DataFrame
"""
def gen_frames(archive):
with zipfile.ZipFile(BytesIO(archive), 'r') as arc:
for f in arc.infolist():
if f.filename.endswith('xml'):
frame = parse_imbalance_volumes(xml_text=arc.read(f))
yield frame
frames = gen_frames(zip_contents)
df = pd.concat(frames)
df.sort_index(inplace=True)
return df
def _parse_imbalance_volumes_timeseries(soup) -> pd.DataFrame:
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.DataFrame
"""
flow_direction = soup.find('flowdirection.direction')
if flow_direction:
# time series uses flow direction codes
flow_direction_factor = {
'A01': 1, # in
'A02': -1 # out
}[flow_direction.text]
else:
# time series uses positive and negative values
flow_direction_factor = 1
df = pd.DataFrame(columns=['Imbalance Volume'])
for period in soup.find_all('period'):
start = pd.to_datetime(period.find('timeinterval').find('start').text)
end = pd.to_datetime(period.find('timeinterval').find('end').text)
resolution = _resolution_to_timedelta(period.find('resolution').text)
tx = pd.date_range(start=start, end=end, freq=resolution, inclusive='left')
points = period.find_all('point')
for dt, point in zip(tx, points):
df.loc[dt, 'Imbalance Volume'] = \
float(point.find('quantity').text) * flow_direction_factor
df.set_index(['Imbalance Volume'])
return df
def _parse_netposition_timeseries(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
quantities = []
if 'REGION' in soup.find('out_domain.mrid').text:
factor = -1 # flow is import so negative
else:
factor = 1
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
quantities.append(factor * float(point.find('quantity').text))
series = pd.Series(index=positions, data=quantities)
series = series.sort_index()
series.index = _parse_datetimeindex(soup)
return series
def _parse_price_timeseries(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
prices = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
prices.append(float(point.find('price.amount').text))
series = pd.Series(index=positions, data=prices)
series = series.sort_index()
series.index = _parse_datetimeindex(soup)
return series
def _parse_load_timeseries(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
prices = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
prices.append(float(point.find('quantity').text))
series = pd.Series(index=positions, data=prices)
series = series.sort_index()
series.index = _parse_datetimeindex(soup)
return series
def _parse_generation_timeseries(soup, per_plant: bool = False, include_eic: bool = False) -> pd.Series:
"""
Works for generation by type, generation forecast, and wind and solar
forecast
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
quantities = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
quantity = point.find('quantity')
if quantity is None:
raise LookupError(
f'No quantity found in this point, it should have one: {point}')
quantities.append(float(quantity.text))
series = pd.Series(index=positions, data=quantities)
series = series.sort_index()
series.index = _parse_datetimeindex(soup)
# Check if there is a psrtype, if so, get it.
_psrtype = soup.find('psrtype')
if _psrtype is not None:
psrtype = _psrtype.text
else:
psrtype = None
# Check if the Direction is IN or OUT
# If IN, this means Actual Consumption is measured
# If OUT, this means Consumption is measured.
# OUT means Consumption of a generation plant, eg. charging a pumped hydro plant
if soup.find(CONSUMPTION_ELEMENT.lower()):
metric = 'Actual Consumption'
else:
metric = 'Actual Aggregated'
name = [metric]
# Set both psrtype and metric as names of the series
if psrtype:
psrtype_name = PSRTYPE_MAPPINGS[psrtype]
name.append(psrtype_name)
if per_plant:
plantname = soup.find('name').text
name.append(plantname)
if include_eic:
eic = soup.find("mrid", codingscheme="A01").text
name.insert(0, eic)
if len(name) == 1:
series.name = name[0]
else:
# We give the series multiple names in a tuple
# This will result in a multi-index upon concatenation
name.reverse()
series.name = tuple(name)
return series
def _parse_water_hydro_timeseries(soup, tz):
"""
Parses timeseries for water reservoirs and hydro storage plants
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
quantities = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
quantity = point.find('quantity')
if quantity is None:
raise LookupError(
f'No quantity found in this point, it should have one: {point}')
quantities.append(float(quantity.text))
series = pd.Series(index=positions, data=quantities)
series = series.sort_index()
series.index = _parse_datetimeindex(soup, tz)
return series
def _parse_installed_capacity_per_plant(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
extract_vals = {'Name': 'registeredresource.name',
'Production Type': 'psrtype',
'Bidding Zone': 'inbiddingzone_domain.mrid',
# 'Status': 'businesstype',
'Voltage Connection Level [kV]':
'voltage_powersystemresources.highvoltagelimit'}
series = pd.Series(extract_vals).apply(lambda v: soup.find(v).text)
# extract only first point
series['Installed Capacity [MW]'] = \
soup.find_all('point')[0].find('quantity').text
series.name = soup.find('registeredresource.mrid').text
return series
def _parse_datetimeindex(soup, tz=None):
"""
Create a datetimeindex from a parsed beautifulsoup,
given that it contains the elements 'start', 'end'
and 'resolution'
Parameters
----------
soup : bs4.element.tag
tz: str
Returns
-------
pd.DatetimeIndex
"""
start = pd.Timestamp(soup.find('start').text)
end = pd.Timestamp(soup.find_all('end')[-1].text)
if tz is not None:
start = start.tz_convert(tz)
end = end.tz_convert(tz)
delta = _resolution_to_timedelta(res_text=soup.find('resolution').text)
index = pd.date_range(start=start, end=end, freq=delta, inclusive='left')
if tz is not None:
dst_jump = len(set(index.map(lambda d: d.dst()))) > 1
if dst_jump and delta == "7D":
# For a weekly granularity, if we jump over the DST date in October,
# date_range erronously returns an additional index element
# because that week contains 169 hours instead of 168.
index = index[:-1]
index = index.tz_convert("UTC")
return index
def _parse_crossborder_flows_timeseries(soup):
"""
Parameters
----------
soup : bs4.element.tag
Returns
-------
pd.Series
"""
positions = []
flows = []
for point in soup.find_all('point'):
positions.append(int(point.find('position').text))
flows.append(float(point.find('quantity').text))
series = pd.Series(index=positions, data=flows)
series = series.sort_index()
try:
series.index = _parse_datetimeindex(soup)
except ValueError as ex:
if "Length mismatch" in str(ex):
series.index = _parse_datetimeindex(soup)[:-1]
return series
def _resolution_to_timedelta(res_text: str) -> str:
"""
Convert an Entsoe resolution to something that pandas can understand
"""
resolutions = {
'PT60M': '60min',
'P1Y': '12M',
'PT15M': '15min',
'PT30M': '30min',
'P1D': '1D',
'P7D': '7D',
'P1M': '1M',
}
delta = resolutions.get(res_text)
if delta is None:
raise NotImplementedError("Sorry, I don't know what to do with the "
"resolution '{}', because there was no "
"documentation to be found of this format. "
"Everything is hard coded. Please open an "
"issue.".format(res_text))
return delta
# Define inverse bidding zone dico to look up bidding zone labels from the
# domain code in the unavailibility parsers: | _INV_BIDDING_ZONE_DICO = {area.code: area.name for area in Area} | 3 | 2023-11-17 09:23:38+00:00 | 8k |
PlaxtonFlarion/NexaFlow | nexaflow/classifier/base.py | [
{
"identifier": "toolbox",
"path": "nexaflow/toolbox.py",
"snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.List, hooks: typing.List = None\n) -> typing.List[float]:\ndef get_current_frame_id(video_cap: cv2.VideoCapture) -> int:\ndef get_current_frame_time(video_cap: cv2.VideoCapture) -> float:\ndef imread(img_path: str, *_, **__) -> np.ndarray:\ndef get_frame_time(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> float:\ndef get_frame_count(video_cap: cv2.VideoCapture) -> int:\ndef get_frame_size(video_cap: cv2.VideoCapture) -> typing.Tuple[int, int]:\ndef get_frame(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> np.ndarray:\ndef turn_grey(old: np.ndarray) -> np.ndarray:\ndef turn_binary(old: np.ndarray) -> np.ndarray:\ndef turn_hog_desc(old: np.ndarray) -> np.ndarray:\ndef turn_lbp_desc(old: np.ndarray, radius: int = None) -> np.ndarray:\ndef turn_blur(old: np.ndarray) -> np.ndarray:\ndef sharpen_frame(old: np.ndarray) -> np.ndarray:\ndef calc_mse(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef calc_psnr(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef compress_frame(\n old: np.ndarray,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n not_grey: bool = None,\n interpolation: int = None,\n *_,\n **__,\n) -> np.ndarray:\ndef get_timestamp_str() -> str:\ndef np2b64str(frame: np.ndarray) -> str:\ndef fps_convert(\n target_fps: int, source_path: str, target_path: str, ffmpeg_exe: str = None\n) -> int:\ndef match_template_with_object(\n template: np.ndarray,\n target: np.ndarray,\n engine_template_cv_method_name: str = None,\n **kwargs,\n) -> typing.Dict[str, typing.Any]:\ndef match_template_with_path(\n template: str, target: np.ndarray, **kwargs\n) -> typing.Dict[str, typing.Any]:\ndef show_progress(total: int, color: int, title: str) -> tqdm:\ndef draw_line(image_path: str, save_path: str = None):"
},
{
"identifier": "constants",
"path": "nexaflow/constants.py",
"snippet": "CHARSET = r\"utf-8\"\nCUT_RESULT_FILE_NAME = r\"cut_result.json\"\nREPORT_FILE_NAME = r\"report.html\"\nBACKGROUND_COLOR = r\"#fffaf4\"\nUNSTABLE_FLAG = r\"-1\"\nUNKNOWN_STAGE_FLAG = r\"-2\"\nIGNORE_FLAG = r\"-3\"\nDEFAULT_THRESHOLD = 0.98\n NEXA = os.path.dirname(os.path.abspath(__file__))\n WORK = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n FORMAT: str = \"| <level>{level: <8}</level> | <level>{message}</level>\"\nclass Constants(object):\n def initial_logger(cls, log_level: str = \"INFO\"):"
},
{
"identifier": "VideoFrame",
"path": "nexaflow/video.py",
"snippet": "class VideoFrame(Frame):\n\n def __init__(self, frame_id: int, timestamp: float, data: np.ndarray):\n super().__init__(frame_id, timestamp, data)\n\n def __str__(self):\n return f\"<VideoFrame id={self.frame_id} timestamp={self.timestamp}>\"\n\n @staticmethod\n def initial(cap: cv2.VideoCapture, frame: np.ndarray) -> \"VideoFrame\":\n frame_id = toolbox.get_current_frame_id(cap)\n timestamp = toolbox.get_current_frame_time(cap)\n new_frame = toolbox.compress_frame(frame, 0.5, (350, 700), False)\n return VideoFrame(frame_id, timestamp, new_frame)\n\n def copy(self) -> \"VideoFrame\":\n return VideoFrame(self.frame_id, self.timestamp, self.data[:])\n\n def contain_image(\n self, *, image_path: str = None, image_object: np.ndarray = None, **kwargs\n ) -> typing.Dict[str, typing.Any]:\n \"\"\"\n 检查给定图像(通过路径或numpy对象)是否存在于当前帧中,并返回匹配的字典\n \"\"\"\n assert image_path or (\n image_object is not None\n ), \"should fill image_path or image_object\"\n\n if image_path:\n logger.debug(f\"found image path, use it first: {image_path}\")\n return toolbox.match_template_with_path(image_path, self.data, **kwargs)\n image_object = toolbox.turn_grey(image_object)\n return toolbox.match_template_with_object(image_object, self.data, **kwargs)"
},
{
"identifier": "VideoObject",
"path": "nexaflow/video.py",
"snippet": "class VideoObject(object):\n\n def __init__(\n self,\n path: typing.Union[str, os.PathLike],\n fps: int = None,\n ):\n \"\"\"\n 初始化,检查文件路径是否有效,执行其他一些初始化操作\n \"\"\"\n assert os.path.isfile(path), f\"video {path} not existed\"\n self.path: str = str(path)\n self.grey_data: typing.Optional[typing.Tuple[\"VideoFrame\"]] = tuple() # 灰度帧\n self.hued_data: typing.Optional[typing.Tuple[\"ColorFrame\"]] = tuple() # 彩色帧\n\n if fps:\n video_path = os.path.join(tempfile.mkdtemp(), f\"tmp_{fps}.mp4\")\n logger.debug(f\"convert video, and bind path to {video_path}\")\n logger.info(f\"转换视频: {video_path}\")\n toolbox.fps_convert(\n fps, self.path, video_path, imageio_ffmpeg.get_ffmpeg_exe()\n )\n self.path = video_path\n\n with toolbox.video_capture(self.path) as cap:\n self.frame_count = toolbox.get_frame_count(cap)\n self.frame_size = toolbox.get_frame_size(cap)\n\n logger.info(f\"视频已生成,视频帧长度: {self.frame_count} 分辨率: {self.frame_size}\")\n\n def __str__(self):\n return f\"<VideoObject path={self.path}>\"\n\n __repr__ = __str__\n\n def sync_timestamp(self, frame_data: tuple[VideoFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n vid_count = vid.reader.nframes\n pbar = toolbox.show_progress(vid_count, 153, \"Synzer\")\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n pbar.update(1)\n pbar.close()\n\n def sync_backstage(self, frame_data: tuple[ColorFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n\n def clean_frames(self):\n \"\"\"\n 清除所有帧数据\n \"\"\"\n self.grey_data = tuple()\n self.hued_data = tuple()\n\n @staticmethod\n def frame_details(frame_type):\n each_cost = frame_type[0].data.nbytes / (1024 ** 2)\n total_cost = each_cost * len(frame_type)\n frame_size = frame_type[0].data.shape[::-1]\n return f\"{frame_type[0].__class__.__name__}: [{each_cost:.2f} MB] [{total_cost:.2f} MB] {frame_size}\"\n\n def load_frames(self, color: bool = False):\n \"\"\"\n 从文件中加载所有帧到内存\n \"\"\"\n logger.info(f\"加载视频帧到内存: {os.path.basename(self.path)}\")\n\n def load_stream(frames: type[VideoFrame]):\n pbar = toolbox.show_progress(self.frame_count, 180, \"Loader\")\n data: list[VideoFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n pbar.update(1)\n pbar.close()\n return data\n\n def back_ground(frames: type[ColorFrame]):\n data: list[ColorFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n return data\n\n def load_stream_sync(brand):\n self.sync_timestamp(tuple(frame_data := load_stream(brand)))\n return frame_data\n\n def back_ground_sync(brand):\n self.sync_backstage(tuple(frame_data := back_ground(brand)))\n return frame_data\n\n start_time, task, hued = time.time(), None, None\n if color:\n task = ThreadPoolExecutor()\n hued = task.submit(back_ground_sync, ColorFrame)\n\n grey = load_stream_sync(VideoFrame)\n self.grey_data = tuple(grey)\n logger.info(f\"灰度帧已加载: {self.frame_details(self.grey_data)}\")\n logger.info(f\"视频加载耗时: {time.time() - start_time:.2f} 秒\")\n return task, hued\n\n def _read_from_file(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从文件中读取帧\n \"\"\"\n with toolbox.video_capture(self.path) as cap:\n success, frame = cap.read()\n while success:\n yield VideoFrame.initial(cap, frame)\n success, frame = cap.read()\n\n def _read_from_mem(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从内存中读取帧\n \"\"\"\n for each_frame in self.grey_data:\n yield each_frame\n\n def _read(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 选择从文件还是从内存中读取帧\n \"\"\"\n if self.grey_data:\n yield from self._read_from_mem()\n else:\n yield from self._read_from_file()\n\n def get_iterator(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 获取帧的迭代器\n \"\"\"\n return self._read()\n\n def get_operator(self) -> _BaseFrameOperator:\n \"\"\"\n 根据是否已经加载帧,返回相应的FrameOperator(`MemFrameOperator`或`FileFrameOperator`)\n \"\"\"\n if self.grey_data:\n return MemFrameOperator(self)\n return FileFrameOperator(self)\n\n def __iter__(self):\n \"\"\"\n 返回一个用于迭代帧的迭代器\n \"\"\"\n return self.get_iterator()"
},
{
"identifier": "VideoCutRange",
"path": "nexaflow/cutter/cut_range.py",
"snippet": "class VideoCutRange(object):\n\n def __init__(\n self,\n video: typing.Union[VideoObject, typing.Dict],\n start: int,\n end: int,\n ssim: typing.List[float],\n mse: typing.List[float],\n psnr: typing.List[float],\n start_time: float,\n end_time: float,\n ):\n if isinstance(video, dict):\n self.video = VideoObject(**video)\n else:\n self.video = video\n\n self.start = start\n self.end = end\n self.ssim = ssim\n self.mse = mse\n self.psnr = psnr\n self.start_time = start_time\n self.end_time = end_time\n\n if start > end:\n self.start, self.end = self.end, self.start\n self.start_time, self.end_time = self.end_time, self.start_time\n\n # logger.debug(\n # f\"new a range: {self.start}({self.start_time}) - {self.end}({self.end_time})\"\n # )\n\n def can_merge(self, another: \"VideoCutRange\", offset: int = None, **_):\n if not offset:\n is_continuous = self.end == another.start\n else:\n is_continuous = self.end + offset >= another.start\n return is_continuous and self.video.path == another.video.path\n\n def merge(self, another: \"VideoCutRange\", **kwargs) -> \"VideoCutRange\":\n assert self.can_merge(another, **kwargs)\n return __class__(\n self.video,\n self.start,\n another.end,\n self.ssim + another.ssim,\n self.mse + another.mse,\n self.psnr + another.psnr,\n self.start_time,\n another.end_time,\n )\n\n def contain(self, frame_id: int) -> bool:\n return frame_id in range(self.start, self.end + 1)\n\n contain_frame_id = contain\n\n def contain_image(\n self, image_path: str = None, image_object: np.ndarray = None, *args, **kwargs\n ) -> typing.Dict[str, typing.Any]:\n target_id = self.pick(*args, **kwargs)[0]\n operator = self.video.get_operator()\n frame = operator.get_frame_by_id(target_id)\n return frame.contain_image(\n image_path=image_path, image_object=image_object, **kwargs\n )\n\n def pick(\n self, frame_count: int = None, is_random: bool = None, *_, **__\n ) -> typing.List[int]:\n if not frame_count:\n frame_count = 3\n logger.debug(\n f\"pick {frame_count} frames \"\n f\"from {self.start}({self.start_time}) \"\n f\"to {self.end}({self.end_time}) \"\n f\"on video {self.video.path}\"\n )\n\n result = list()\n if is_random:\n return random.sample(range(self.start, self.end), frame_count)\n length = self.get_length()\n\n frame_count += 1\n for _ in range(1, frame_count):\n cur = int(self.start + length / frame_count * _)\n result.append(cur)\n return result\n\n def get_frames(\n self, frame_id_list: typing.List[int], *_, **__\n ) -> typing.List[VideoFrame]:\n\n out = list()\n operator = self.video.get_operator()\n for each_id in frame_id_list:\n frame = operator.get_frame_by_id(each_id)\n out.append(frame)\n return out\n\n def pick_and_get(self, *args, **kwargs) -> typing.List[VideoFrame]:\n picked = self.pick(*args, **kwargs)\n return self.get_frames(picked, *args, **kwargs)\n\n def get_length(self):\n return self.end - self.start + 1\n\n def is_stable(\n self, threshold: float = None, psnr_threshold: float = None, **_\n ) -> bool:\n\n if not threshold:\n threshold = constants.DEFAULT_THRESHOLD\n\n res = np.mean(self.ssim) > threshold\n if res and psnr_threshold:\n res = np.mean(self.psnr) > psnr_threshold\n\n return res\n\n def is_loop(self, threshold: float = None, **_) -> bool:\n if not threshold:\n threshold = constants.DEFAULT_THRESHOLD\n operator = self.video.get_operator()\n start_frame = operator.get_frame_by_id(self.start)\n end_frame = operator.get_frame_by_id(self.end)\n return toolbox.compare_ssim(start_frame.data, end_frame.data) > threshold\n\n def diff(\n self,\n another: \"VideoCutRange\",\n pre_hooks: typing.List[BaseHook],\n *args,\n **kwargs,\n ) -> typing.List[float]:\n self_picked = self.pick_and_get(*args, **kwargs)\n another_picked = another.pick_and_get(*args, **kwargs)\n return toolbox.multi_compare_ssim(self_picked, another_picked, pre_hooks)\n\n def __str__(self):\n return f\"<VideoCutRange [{self.start}({self.start_time})-{self.end}({self.end_time})] ssim={self.ssim}>\"\n\n __repr__ = __str__"
},
{
"identifier": "BaseHook",
"path": "nexaflow/hook.py",
"snippet": "class BaseHook(object):\n\n def __init__(self, *_, **__):\n # logger.debug(f\"start initialing: {self.__class__.__name__} ...\")\n logger.info(f\"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...\")\n self.result = dict()\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n # info = f\"execute hook: {self.__class__.__name__}\"\n\n frame_id = frame.frame_id\n if frame_id != -1:\n # logger.debug(f\"{info}, frame id: {frame_id}\")\n pass\n return frame"
},
{
"identifier": "CompressHook",
"path": "nexaflow/hook.py",
"snippet": "class CompressHook(BaseHook):\n\n def __init__(\n self,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n *_,\n **__,\n ):\n super().__init__(*_, **__)\n self.compress_rate = compress_rate\n self.target_size = target_size\n # logger.debug(f\"compress rate: {compress_rate}\")\n # logger.debug(f\"target size: {target_size}\")\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n frame.data = toolbox.compress_frame(\n frame.data, compress_rate=self.compress_rate, target_size=self.target_size\n )\n return frame"
},
{
"identifier": "GreyHook",
"path": "nexaflow/hook.py",
"snippet": "class GreyHook(BaseHook):\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n frame.data = toolbox.turn_grey(frame.data)\n return frame"
}
] | import os
import cv2
import json
import time
import typing
import pathlib
import difflib
import numpy as np
from loguru import logger
from collections import OrderedDict
from nexaflow import toolbox, constants
from nexaflow.video import VideoFrame, VideoObject
from nexaflow.cutter.cut_range import VideoCutRange
from nexaflow.hook import BaseHook, CompressHook, GreyHook | 4,809 |
class SingleClassifierResult(object):
def __init__(
self,
video_path: str,
frame_id: int,
timestamp: float,
stage: str,
data: np.ndarray = None,
):
self.video_path: str = video_path
self.frame_id: int = frame_id
self.timestamp: float = timestamp
self.stage: str = stage
self.data: np.ndarray = data
def to_video_frame(self, *args, **kwargs) -> VideoFrame:
if self.data is not None:
return VideoFrame(self.frame_id, self.timestamp, self.data)
with toolbox.video_capture(self.video_path) as cap:
frame = toolbox.get_frame(cap, self.frame_id)
compressed = toolbox.compress_frame(frame, *args, **kwargs)
return VideoFrame(self.frame_id, self.timestamp, compressed)
def get_data(self) -> np.ndarray:
return self.to_video_frame().data
def is_stable(self) -> bool:
return self.stage not in (
|
class SingleClassifierResult(object):
def __init__(
self,
video_path: str,
frame_id: int,
timestamp: float,
stage: str,
data: np.ndarray = None,
):
self.video_path: str = video_path
self.frame_id: int = frame_id
self.timestamp: float = timestamp
self.stage: str = stage
self.data: np.ndarray = data
def to_video_frame(self, *args, **kwargs) -> VideoFrame:
if self.data is not None:
return VideoFrame(self.frame_id, self.timestamp, self.data)
with toolbox.video_capture(self.video_path) as cap:
frame = toolbox.get_frame(cap, self.frame_id)
compressed = toolbox.compress_frame(frame, *args, **kwargs)
return VideoFrame(self.frame_id, self.timestamp, compressed)
def get_data(self) -> np.ndarray:
return self.to_video_frame().data
def is_stable(self) -> bool:
return self.stage not in ( | constants.UNSTABLE_FLAG, | 1 | 2023-11-13 05:27:34+00:00 | 8k |
OpenBMB/XAgent | XAgent/recorder.py | [
{
"identifier": "AutoGPTQuery",
"path": "XAgent/workflow/base_query.py",
"snippet": "class AutoGPTQuery(BaseQuery):\n \"\"\"\n A specific type of query that inherits from the BaseQuery class.\n Used for specific GPT model actions.\n \"\"\"\n\n def __init__(self,**args):\n \"\"\"\n Constructs all the necessary attributes for the AutoGPTQuery object by inheriting from BaseQuery class.\n\n Args:\n **args: Variable length argument list which is a dictionary of attribute key-value pairs.\n \"\"\"\n super().__init__(**args)\n\n def log_self(self):\n \"\"\"\n Logs AutoGPTQuery details using logger.\n\n This method logs \"Role\", \"Task\" with role_name and task respectively.\n If there is any detail in the plan, it also logs \"Plan\" with each detail in the plan.\n \"\"\"\n logger.typewriter_log(\"Role\", Fore.YELLOW, self.role_name)\n logger.typewriter_log(\"Task\", Fore.YELLOW, self.task)\n if self.plan != []:\n logger.typewriter_log(\"Plan\", Fore.YELLOW)\n for k, plan in enumerate(self.plan):\n logger.typewriter_log(f\" {k+1}.{plan}\", Style.RESET_ALL)"
},
{
"identifier": "XAgentConfig",
"path": "XAgent/config.py",
"snippet": "class XAgentConfig(dict):\n \"\"\"\n A dictionary-like configuration class with attribute-style access.\n\n Inherited from dictionary, this class provides methods for accessing and modifying\n dictionary items using attributes and methods.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize class instance.\n\n Args:\n *args: Variable length argument list.\n **kwargs: Arbitrary keyword arguments.\n \"\"\"\n super().__init__(*args, **kwargs)\n\n def __getattr__(self, key):\n \"\"\"\n Access the class attribute.\n\n Args:\n key (str): Key to access the class attribute.\n\n Returns:\n Value of the class attribute for the input key.\n\n Raises:\n AttributeError: If the input key is not present in the dictionary.\n \"\"\"\n if key in self:\n return self[key]\n raise AttributeError(f\"'DotDict' object has no attribute '{key}'\")\n\n def __setattr__(self, key, value):\n \"\"\"\n Set the value of the class attribute.\n\n Args:\n key (str): Key for the attribute to set.\n value : Value to be set for the input key.\n \"\"\"\n self[key] = value\n\n def __delattr__(self, key):\n \"\"\"\n Delete the class attribute.\n\n Args:\n key (str): Key of the attribute to delete.\n\n Raises:\n AttributeError: If the input key is not present in the dictionary.\n \"\"\"\n if key in self:\n del self[key]\n else:\n raise AttributeError(f\"'DotDict' object has no attribute '{key}'\")\n\n def to_dict(self, safe=False):\n \"\"\"\n Convert the xAgentConfig object to dictionary.\n\n Args:\n safe (bool, optional): If True, 'api_keys' will be excluded from the output.\n Default is False.\n\n Returns:\n dict: Dictionary representation of the instance.\n \"\"\"\n if safe:\n right_value = deepcopy(self)\n right_value.pop(\"api_keys\", \"\")\n return right_value\n else:\n return self\n\n def reload(self, config_file='assets/config.yml'):\n \"\"\"\n Load configuration data from YAML file and environment variables. And also update\n the ARGS with new data.\n\n Args:\n config_file (str, optional): Path to the YAML configuration file.\n Default is 'assets/config.yml'.\n \"\"\"\n config_file = os.getenv('CONFIG_FILE', config_file)\n print('---config file---\\n'+str(config_file))\n self.__init__(\n **yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader))\n # check environment variables\n self['selfhost_toolserver_url'] = os.getenv(\n 'TOOLSERVER_URL', self['selfhost_toolserver_url'])\n print('---args---\\n'+str(ARGS))\n self.update(ARGS)\n\n @staticmethod\n def get_default_config(config_file='assets/config.yml'):\n \"\"\"\n Get default configuration data from given file through environment variable.\n\n Args:\n config_file (str, optional): Path to the YAML configuration file.\n Default is 'assets/config.yml'.\n\n Returns:\n XAgentConfig: An instance of XAgentConfig with loaded configuration data.\n \"\"\"\n try:\n config_file = os.getenv('CONFIG_FILE', config_file)\n cfg = yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader)\n except:\n cfg = {}\n return XAgentConfig(**cfg)"
},
{
"identifier": "SessionLocal",
"path": "XAgentServer/database/connect.py",
"snippet": "SQLALCHEMY_DATABASE_URL = os.getenv('MYSQL_DB_URL', XAgentServerEnv.DB.db_url)"
},
{
"identifier": "Logger",
"path": "XAgentServer/loggers/logs.py",
"snippet": "class Logger(metaclass=abc.ABCMeta):\n \"\"\"\n Logger that handle titles in different colors.\n Outputs logs in console, activity.log, and errors.log\n For console handler: simulates typing\n \"\"\"\n\n def __init__(self, log_dir: str = None, log_name: str= \"\", log_file: str = \"activity.log\", error_file: str = \"errors.log\"):\n \"\"\"init\"\"\"\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n # create log directory if it doesn't exist\n self.log_name = time.strftime(\"%Y-%m-%d\", time.localtime()) if not log_name else log_name\n self.logger = logging.getLogger(self.log_name)\n console_formatter = RecordFormatter(\"%(title_color)s %(message)s\")\n\n # Create a handler for console which simulate typing\n self.typing_console_handler = TypingConsoleHandler()\n self.typing_console_handler.setLevel(logging.INFO)\n self.typing_console_handler.setFormatter(console_formatter)\n\n # Create a handler for console without typing simulation\n self.console_handler = ConsoleHandler()\n self.console_handler.setLevel(logging.DEBUG)\n self.console_handler.setFormatter(console_formatter)\n\n self.speak_mode = False\n self.chat_plugins = []\n\n # Info handler in activity.log\n self.file_handler = logging.FileHandler(\n os.path.join(log_dir, log_file), \"a\", \"utf-8\"\n )\n self.file_handler.setLevel(logging.DEBUG)\n info_formatter = RecordFormatter(\n \"%(asctime)s [%(threadName)s] %(levelname)s: %(title_color)s %(title)s %(message)s\"\n )\n self.file_handler.setFormatter(info_formatter)\n\n # Error handler error.log\n error_handler = logging.FileHandler(\n os.path.join(log_dir, error_file), \"a\", \"utf-8\"\n )\n error_handler.setLevel(logging.ERROR)\n error_formatter = RecordFormatter(\n \"%(asctime)s [%(threadName)s] %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title_color)s %(title)s\"\n \" %(message_no_color)s\"\n )\n error_handler.setFormatter(error_formatter)\n\n # self.typing_logger = logging.getLogger(self.log_name)\n # if not self.typing_logger.handlers:\n # self.typing_logger.addHandler(self.typing_console_handler)\n # self.typing_logger.addHandler(self.file_handler)\n # self.typing_logger.addHandler(error_handler)\n # self.typing_logger.setLevel(logging.DEBUG)\n\n if self.log_name.endswith(\"_INTERACT\") or not self.logger.handlers:\n # self.logger.addHandler(self.typing_console_handler)\n self.logger.addHandler(self.console_handler)\n self.logger.addHandler(error_handler)\n self.logger.addHandler(self.file_handler)\n self.logger.setLevel(logging.DEBUG)\n \n def typewriter_log(\n self, title=\"\", title_color=\"\", content=\"\", speak_text=False, level=logging.INFO\n ):\n # if speak_text and self.speak_mode:\n # say_text(f\"{title}. {content}\")\n\n for plugin in self.chat_plugins:\n plugin.report(f\"{title}. {content}\")\n\n if content:\n if isinstance(content, list):\n content = \" \".join(content)\n else:\n content = \"\"\n\n self.logger.log(\n level, content, extra={\"title\": title, \"color\": title_color}\n )\n\n def debug(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.DEBUG)\n\n def info(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.INFO)\n\n def warn(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.WARN)\n\n def error(self, title, message=\"\"):\n self._log(title, Fore.RED, message, logging.ERROR)\n\n def _log(\n self,\n title: str = \"\",\n title_color: str = \"\",\n message: str = \"\",\n level=logging.INFO,\n ):\n if message:\n if isinstance(message, list):\n message = \" \".join(message)\n self.logger.log(\n level, message, extra={\"title\": str(title), \"color\": str(title_color)}\n )\n\n def set_level(self, level):\n self.logger.setLevel(level)\n self.typing_logger.setLevel(level)\n\n def double_check(self, additionalText=None):\n if not additionalText:\n additionalText = (\n \"Please ensure you've setup and configured everything\"\n \" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to \"\n \"double check. You can also create a github issue or join the discord\"\n \" and ask there!\"\n )\n\n self.typewriter_log(\"DOUBLE CHECK CONFIGURATION\", Fore.YELLOW, additionalText)\n\n def log_json(self, data: Any, file_name: str) -> None:\n # Define log directory\n this_files_dir_path = os.path.dirname(__file__)\n log_dir = os.path.join(this_files_dir_path, \"../logs\")\n\n # Create a handler for JSON files\n json_file_path = os.path.join(log_dir, file_name)\n json_data_handler = JsonFileHandler(json_file_path)\n json_data_handler.setFormatter(JsonFormatter())\n\n # Log the JSON data using the custom file handler\n self.json_logger.addHandler(json_data_handler)\n self.json_logger.debug(data)\n self.json_logger.removeHandler(json_data_handler)\n\n def get_log_directory(self):\n this_files_dir_path = os.path.dirname(__file__)\n log_dir = os.path.join(this_files_dir_path, \"../logs\")\n return os.path.abspath(log_dir)"
},
{
"identifier": "XAgentRunningRecord",
"path": "XAgentServer/models/recorder.py",
"snippet": "class XAgentRunningRecord(metaclass=abc.ABCMeta):\n \"\"\"XAgent Running Recorder\"\"\"\n def __init__(self,\n record_id: str,\n current: str,\n node_id: str,\n node_type: str,\n data: dict,\n create_time: str,\n update_time: str,\n is_deleted: bool,\n ):\n self.record_id = record_id\n self.current = current\n self.node_id = node_id\n self.node_type = node_type\n self.data = data\n self.create_time = create_time\n self.update_time = update_time\n self.is_deleted = is_deleted\n\n def to_dict(self):\n \"\"\"XAgent Running Recorder to dict\"\"\"\n return {\n \"record_id\": self.record_id,\n \"current\": self.current,\n \"node_id\": self.node_id,\n \"node_type\": self.node_type,\n \"data\": self.data,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time,\n \"is_deleted\": self.is_deleted,\n }\n\n @classmethod\n def from_db(cls, db: RunningRecord):\n \"\"\"From db\"\"\"\n return cls(\n record_id=db.record_id,\n current=db.current,\n node_id=db.node_id,\n node_type=db.node_type,\n data=db.data,\n create_time=db.create_time,\n update_time=db.update_time,\n is_deleted=db.is_deleted,\n )\n\n @classmethod\n def from_dict(cls, data: dict):\n \"\"\"dict to XAgent Running Recorder\"\"\"\n return cls(\n record_id=data[\"record_id\"],\n current=data[\"current\"],\n node_id=data[\"node_id\"],\n node_type=data[\"node_type\"],\n data=data[\"data\"],\n create_time=data[\"create_time\"],\n update_time=data[\"update_time\"],\n is_deleted=data[\"is_deleted\"],\n )"
},
{
"identifier": "RunningRecordCRUD",
"path": "XAgentServer/application/cruds/recorder.py",
"snippet": "class RunningRecordCRUD(metaclass=abc.ABCMeta):\n \"\"\"\n Recorder CRUD\n \"\"\"\n\n @classmethod\n def get_record_list(cls, db: Session, record_id: str) -> list[XAgentRunningRecord]:\n \"\"\"\n get all records\n \n Args:\n db: database session\n record_id: record_id\n \"\"\"\n try:\n return RecordDBInterface.get_record_list(db=db, record_id=record_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Recorder Module]: {str(e)}\") from e\n\n @classmethod\n def get_record(cls,\n db: Session,\n record_id: str | None = None) -> XAgentRunningRecord | None:\n \"\"\"\n get record by record_id\n \n Args:\n db: database session\n record_id: record_id\n \n Returns:\n record\n \n \"\"\"\n try:\n return RecordDBInterface.get_record(db=db, record_id=record_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Recorder Module]: {str(e)}\") from e\n\n @classmethod\n def insert_record(cls,\n db: Session,\n record: XAgentRunningRecord):\n \"\"\"\n insert record\n \n Args:\n db: database session\n record: record\n \n \"\"\"\n try:\n RecordDBInterface.insert_record(db=db, record=record)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Recorder Module]: {str(e)}\") from e\n\n @classmethod\n def get_record_by_type(cls,\n db: Session,\n record_id: str,\n node_id: str = \"\",\n node_type: str = \"\") -> List[XAgentRunningRecord]:\n \"\"\"\n get record by id\n \n Args:\n db: database session\n record_id: record_id\n \n Returns:\n record\n \n \"\"\"\n try:\n return RecordDBInterface.get_record_by_type(db=db,\n record_id=record_id,\n node_id=node_id,\n node_type=node_type)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Recorder Module]: {str(e)}\") from e"
},
{
"identifier": "RecorderTypeEnum",
"path": "XAgentServer/enums/recorder_type.py",
"snippet": "class RecorderTypeEnum:\n \"\"\"XAgent Running Recorder Type Enum\n \"\"\"\n QUERY = \"query\"\n CONFIG = \"config\"\n LLM_INPUT_PAIR = \"llm_input_pair\"\n TOOL_SERVER_PAIR = \"tool_server_pair\"\n NOW_SUBTASK_ID = \"now_subtask_id\"\n TOOL_CALL = \"tool_call\"\n PLAN_REFINE = \"plan_refine\"\n LLM_SERVER_CACHE = \"llm_server_cache\"\n TOOL_SERVER_CACHE = \"tool_server_cache\"\n TOOL_CALL_CACHE = \"tool_call_cache\"\n PLAN_REFINE_CACHE = \"plan_refine_cache\"\n LLM_INTERFACE_ID = \"llm_interface_id\"\n TOOL_SERVER_INTERFACE_ID = \"toolserver_interface_id\"\n TOOL_CALL_ID = \"tool_call_id\""
}
] | from contextlib import contextmanager
from colorama import Fore
from XAgent.workflow.base_query import AutoGPTQuery
from XAgent.config import XAgentConfig
from XAgentServer.database.connect import SessionLocal
from XAgentServer.loggers.logs import Logger
from XAgentServer.models.recorder import XAgentRunningRecord
from XAgentServer.application.cruds.recorder import RunningRecordCRUD
from XAgentServer.enums.recorder_type import RecorderTypeEnum
import datetime
import os
import time
import json
import re | 4,735 | """XAgent Running Recorder Util"""
def dump_common_things(object):
"""common"""
if type(object) in [str, int, float, bool]:
return object
if isinstance(object, dict):
return {dump_common_things(key): dump_common_things(value) for key, value in object.items()}
if isinstance(object, list):
return [dump_common_things(cont) for cont in object]
method = getattr(object, 'to_json', None)
if callable(method):
return method()
@contextmanager
def get_db():
"""
Provide a transactional scope around a series of operations.
"""
session = SessionLocal()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class RunningRecoder():
"""A class used to record the running sequences of the program, also including program query status and config data.
"""
def __init__(self, record_id: str, newly_start=True, root_dir=None, logger: Logger=None):
self.record_id = record_id
self.record_root_dir = root_dir
if not os.path.exists(self.record_root_dir):
os.makedirs(self.record_root_dir, exist_ok=True)
self.newly_start = newly_start # 是全新启动的
self.logger = logger
self.query = {}
self.config = {}
self.llm_interface_id = 0
self.toolserver_interface_id = 0
self.tool_call_id = 0
self.plan_refine_id = 0
self.llm_server_cache = []
self.tool_server_cache = []
self.tool_call_cache = []
self.plan_refine_cache = []
self.now_subtask_id = None
def change_now_task(self, new_subtask_id):
"""change now task"""
self.now_subtask_id = new_subtask_id
self.tool_call_id = 0
self.plan_refine_id = 0
def generate_record(self, current, node_id, node_type, data):
"""generate a recorder"""
self.logger.typewriter_log(title="-=-=-=-=-=-=-=Recorder Start-=-=-=-=-=-=-=\n",
title_color=Fore.GREEN,
content=f"Current: {current} Node: {node_type} {node_id}")
json_str = json.dumps(data, ensure_ascii=False, indent=4)
json_str=re.sub(r'"api_key": "(.+?)"', r'"api_key": "**"', json_str)
self.logger.typewriter_log(title="-=-=-=-=-=-=-=Data -=-=-=-=-=-=-=\n",
title_color=Fore.GREEN,
content=json_str)
self.logger.typewriter_log(title="-=-=-=-=-=-=-=Recorder End-=-=-=-=-=-=-=",
title_color=Fore.GREEN,
content="")
return XAgentRunningRecord(
record_id=self.record_id,
current=current,
node_id=node_id,
node_type=node_type,
data=data,
create_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
update_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
is_deleted=False,
)
def regist_plan_modify(self,
refine_function_name,
refine_function_input,
refine_function_output,
plan_after):
"""注册一个plan_refine的记录"""
plan_refine_record = {
"refine_function_name": dump_common_things(refine_function_name),
"refine_function_input": dump_common_things(refine_function_input),
"refine_function_output": dump_common_things(refine_function_output),
"plan_after": dump_common_things(plan_after),
}
record = self.generate_record(
current=self.now_subtask_id,
node_id=self.plan_refine_id,
node_type=RecorderTypeEnum.PLAN_REFINE,
data=plan_refine_record,
)
with get_db() as db:
| """XAgent Running Recorder Util"""
def dump_common_things(object):
"""common"""
if type(object) in [str, int, float, bool]:
return object
if isinstance(object, dict):
return {dump_common_things(key): dump_common_things(value) for key, value in object.items()}
if isinstance(object, list):
return [dump_common_things(cont) for cont in object]
method = getattr(object, 'to_json', None)
if callable(method):
return method()
@contextmanager
def get_db():
"""
Provide a transactional scope around a series of operations.
"""
session = SessionLocal()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class RunningRecoder():
"""A class used to record the running sequences of the program, also including program query status and config data.
"""
def __init__(self, record_id: str, newly_start=True, root_dir=None, logger: Logger=None):
self.record_id = record_id
self.record_root_dir = root_dir
if not os.path.exists(self.record_root_dir):
os.makedirs(self.record_root_dir, exist_ok=True)
self.newly_start = newly_start # 是全新启动的
self.logger = logger
self.query = {}
self.config = {}
self.llm_interface_id = 0
self.toolserver_interface_id = 0
self.tool_call_id = 0
self.plan_refine_id = 0
self.llm_server_cache = []
self.tool_server_cache = []
self.tool_call_cache = []
self.plan_refine_cache = []
self.now_subtask_id = None
def change_now_task(self, new_subtask_id):
"""change now task"""
self.now_subtask_id = new_subtask_id
self.tool_call_id = 0
self.plan_refine_id = 0
def generate_record(self, current, node_id, node_type, data):
"""generate a recorder"""
self.logger.typewriter_log(title="-=-=-=-=-=-=-=Recorder Start-=-=-=-=-=-=-=\n",
title_color=Fore.GREEN,
content=f"Current: {current} Node: {node_type} {node_id}")
json_str = json.dumps(data, ensure_ascii=False, indent=4)
json_str=re.sub(r'"api_key": "(.+?)"', r'"api_key": "**"', json_str)
self.logger.typewriter_log(title="-=-=-=-=-=-=-=Data -=-=-=-=-=-=-=\n",
title_color=Fore.GREEN,
content=json_str)
self.logger.typewriter_log(title="-=-=-=-=-=-=-=Recorder End-=-=-=-=-=-=-=",
title_color=Fore.GREEN,
content="")
return XAgentRunningRecord(
record_id=self.record_id,
current=current,
node_id=node_id,
node_type=node_type,
data=data,
create_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
update_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
is_deleted=False,
)
def regist_plan_modify(self,
refine_function_name,
refine_function_input,
refine_function_output,
plan_after):
"""注册一个plan_refine的记录"""
plan_refine_record = {
"refine_function_name": dump_common_things(refine_function_name),
"refine_function_input": dump_common_things(refine_function_input),
"refine_function_output": dump_common_things(refine_function_output),
"plan_after": dump_common_things(plan_after),
}
record = self.generate_record(
current=self.now_subtask_id,
node_id=self.plan_refine_id,
node_type=RecorderTypeEnum.PLAN_REFINE,
data=plan_refine_record,
)
with get_db() as db: | RunningRecordCRUD.insert_record(db=db, record=record) | 5 | 2023-10-16 03:44:57+00:00 | 8k |
deepseek-ai/DeepSeek-Coder | Evaluation/HumanEval/human_eval/evaluation.py | [
{
"identifier": "stream_jsonl",
"path": "Evaluation/HumanEval/human_eval/data.py",
"snippet": "def stream_jsonl(filename: str) -> Iterable[Dict]:\n \"\"\"\n Parses each jsonl line and yields it as a dictionary\n \"\"\"\n if filename.endswith(\".gz\"):\n with open(filename, \"rb\") as gzfp:\n with gzip.open(gzfp, 'rt') as fp:\n for line in fp:\n if any(not x.isspace() for x in line):\n yield json.loads(line)\n else:\n with open(filename, \"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n if any(not x.isspace() for x in line):\n yield json.loads(line)"
},
{
"identifier": "check_correctness",
"path": "Evaluation/HumanEval/human_eval/execution.py",
"snippet": "def check_correctness(\n task_id: str,\n sample: dict,\n language_type: str,\n timeout: float = 3.0,\n tmp_dir: str = None,\n completion_id: Optional[int] = None,\n) -> Dict:\n \"\"\"\n Evaluates the functional correctness of a completion by running the test\n suite provided in the problem.\n \"\"\"\n\n def unsafe_execute(tmp_dir):\n random_id = random.randint(1, 100000)\n if \"python\" in language_type.lower():\n with create_tempdir():\n\n # These system calls are needed when cleaning up tempdir.\n import os\n import shutil\n rmtree = shutil.rmtree\n rmdir = os.rmdir\n chdir = os.chdir\n\n # Disable functionalities that can make destructive changes to the test.\n reliability_guard()\n\n try:\n exec_globals = {}\n with swallow_io():\n with time_limit(timeout):\n # WARNING\n # This program exists to execute untrusted model-generated code. Although\n # it is highly unlikely that model-generated code will do something overtly\n # malicious in response to this test suite, model-generated code may act\n # destructively due to a lack of model capability or alignment.\n # Users are strongly encouraged to sandbox this evaluation suite so that it\n # does not perform destructive actions on their host or network.\n # Once you have read this disclaimer and taken appropriate precautions,\n # uncomment the following line and proceed at your own risk:\n exec(sample[\"test_code\"], exec_globals)\n result.append(\"passed\")\n except TimeoutException:\n result.append(\"timed out\")\n except AssertionError as e:\n result.append(f\"failed: AssertionError\")\n except BaseException as e:\n result.append(f\"failed: {e}\")\n #print(sample[\"test_code\"])\n #print(result)\n # Needed for cleaning up.\n shutil.rmtree = rmtree\n os.rmdir = rmdir\n os.chdir = chdir\n\n elif \"go\" in language_type.lower():\n assert tmp_dir is not None, \"Go should be evaluated in a dir where necessary module files installed.\"\n\n import os\n import shutil\n\n if \"tmp\" not in tmp_dir:\n tmp_dir = os.path.join(tmp_dir, \"tmp\")\n tmp_dir = os.path.join(tmp_dir, f\"{task_id.replace('/', '-')}-{random_id}\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n origin_path = os.getcwd()\n os.chdir(tmp_dir)\n open(f\"main_test.go\", 'w').write(sample[\"test_code\"])\n try:\n exec_result = None\n with time_limit(timeout):\n # WARNING\n # This program exists to execute untrusted model-generated code. Although\n # it is highly unlikely that model-generated code will do something overtly\n # malicious in response to this test suite, model-generated code may act\n # destructively due to a lack of model capability or alignment.\n # Users are strongly encouraged to sandbox this evaluation suite so that it\n # does not perform destructive actions on their host or network.\n # Once you have read this disclaimer and taken appropriate precautions,\n # uncomment the following line and proceed at your own risk:\n exec_result = subprocess.run([f\"{go_exec}go\", \"test\", f\"-timeout={timeout}s\", \"main_test.go\"], timeout=timeout, capture_output=True)\n\n if exec_result.returncode == 0:\n result.append(\"passed\")\n else:\n if exec_result.stderr:\n try:\n err = exec_result.stderr.decode()\n except:\n err = exec_result.stderr\n else:\n try:\n err = exec_result.stdout.decode()\n except:\n err = exec_result.stdout\n result.append(f\"failed: {err}\")\n\n except TimeoutException:\n result.append(\"timed out\")\n os.chdir(origin_path)\n shutil.rmtree(tmp_dir)\n elif \"js\" in language_type.lower():\n import os\n import shutil\n\n if \"tmp\" not in tmp_dir:\n tmp_dir = os.path.join(tmp_dir, \"tmp\")\n tmp_dir = os.path.join(tmp_dir, f\"{task_id.replace('/', '-')}-{random_id}\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n origin_path = os.getcwd()\n os.chdir(tmp_dir)\n open(f\"test.js\", 'w').write(sample[\"test_code\"])\n try:\n exec_result = None\n with time_limit(timeout):\n # WARNING\n # This program exists to execute untrusted model-generated code. Although\n # it is highly unlikely that model-generated code will do something overtly\n # malicious in response to this test suite, model-generated code may act\n # destructively due to a lack of model capability or alignment.\n # Users are strongly encouraged to sandbox this evaluation suite so that it\n # does not perform destructive actions on their host or network.\n # Once you have read this disclaimer and taken appropriate precautions,\n # uncomment the following line and proceed at your own risk:\n exec_result = subprocess.run([f\"{node_exec}node\", \"test.js\"], timeout=timeout, capture_output=True)\n\n if exec_result.stderr.decode():\n err = exec_result.stderr.decode()\n result.append(f\"failed: {err}\")\n elif exec_result.stdout.decode():\n err = exec_result.stdout.decode()\n result.append(f\"failed: {err}\")\n else:\n result.append(\"passed\")\n\n except TimeoutException:\n result.append(\"timed out\")\n os.chdir(origin_path)\n shutil.rmtree(tmp_dir)\n elif \"cpp\" in language_type.lower():\n import os\n import shutil\n origin_path = os.getcwd()\n if \"tmp\" not in tmp_dir:\n tmp_dir = os.path.join(tmp_dir, \"tmp\")\n tmp_dir = os.path.join(tmp_dir, f\"{task_id.replace('/', '-')}-{random_id}\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n\n os.chdir(tmp_dir)\n open(f\"test.cpp\", 'w').write(sample[\"test_code\"])\n if \"162\" in task_id:\n compilation_result = subprocess.run([\"/usr/bin/g++\", \"-std=c++17\", \"test.cpp\", \"-lcrypto\", \"-lssl\"],\n timeout=timeout,\n capture_output=True)\n else:\n compilation_result = subprocess.run([\"/usr/bin/g++\", \"-std=c++17\", \"test.cpp\"], timeout=timeout,\n capture_output=True)\n if compilation_result.returncode != 0:\n if compilation_result.stderr:\n err = compilation_result.stderr.decode()\n else:\n err = compilation_result.stdout.decode()\n result.append(f\"failed: compilation error: {err}\")\n else:\n try:\n exec_result = None\n with time_limit(timeout):\n # WARNING\n # This program exists to execute untrusted model-generated code. Although\n # it is highly unlikely that model-generated code will do something overtly\n # malicious in response to this test suite, model-generated code may act\n # destructively due to a lack of model capability or alignment.\n # Users are strongly encouraged to sandbox this evaluation suite so that it\n # does not perform destructive actions on their host or network.\n # Once you have read this disclaimer and taken appropriate precautions,\n # uncomment the following line and proceed at your own risk:\n exec_result = subprocess.run([\"./a.out\"], timeout=timeout, capture_output=True)\n\n if exec_result.returncode == 0:\n result.append(\"passed\")\n else:\n if exec_result.stderr:\n try:\n err = exec_result.stderr.decode()\n except:\n err = exec_result.stderr\n else:\n try:\n err = exec_result.stdout.decode()\n except:\n err = exec_result.stdout\n result.append(f\"failed: {err}\")\n except TimeoutException:\n result.append(\"timed out\")\n #print(result[-1])\n #print(sample[\"test_code\"])\n os.chdir(origin_path)\n shutil.rmtree(tmp_dir)\n elif \"php\" in language_type.lower():\n import os\n import shutil\n origin_path = os.getcwd()\n if \"tmp\" not in tmp_dir:\n tmp_dir = os.path.join(tmp_dir, \"tmp\")\n tmp_dir = os.path.join(tmp_dir, f\"{task_id.replace('/', '-')}-{random_id}\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n\n os.chdir(tmp_dir)\n open(f\"test.php\", 'w').write(sample[\"test_code\"])\n try:\n exec_result = None\n with time_limit(timeout):\n cmd = f\"{php_exec}php -f test.php\"\n exec_result = subprocess.run(cmd, timeout=timeout, capture_output=True, shell=True)\n\n if exec_result.returncode == 0:\n result.append(\"passed\")\n else:\n if exec_result.stderr:\n try:\n err = exec_result.stderr.decode()\n except:\n err = exec_result.stderr\n else:\n try:\n err = exec_result.stdout.decode()\n except:\n err = exec_result.stdout\n result.append(f\"failed: {err}\")\n except TimeoutException:\n result.append(\"timed out\")\n print(result[-1])\n print(sample[\"test_code\"])\n os.chdir(origin_path)\n shutil.rmtree(tmp_dir)\n elif \"sh\" in language_type.lower():\n import os\n import shutil\n origin_path = os.getcwd()\n if \"tmp\" not in tmp_dir:\n tmp_dir = os.path.join(tmp_dir, \"tmp\")\n tmp_dir = os.path.join(tmp_dir, f\"{task_id.replace('/', '-')}-{random_id}\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n\n os.chdir(tmp_dir)\n open(f\"test.sh\", 'w').write(sample[\"test_code\"])\n try:\n exec_result = None\n with time_limit(timeout):\n cmd = \"/bin/bash test.sh\"\n exec_result = subprocess.run(cmd, timeout=10, capture_output=True, shell=True)\n\n if exec_result.returncode == 0:\n result.append(\"passed\")\n else:\n if exec_result.stderr:\n try:\n err = exec_result.stderr.decode()\n except:\n err = exec_result.stderr\n else:\n try:\n err = exec_result.stdout.decode()\n except:\n err = exec_result.stdout\n result.append(f\"failed: {err}\")\n except TimeoutException:\n result.append(\"timed out\")\n #print(result[-1])\n #print(sample[\"test_code\"])\n os.chdir(origin_path)\n shutil.rmtree(tmp_dir)\n elif \"ts\" in language_type.lower():\n import os\n import shutil\n origin_path = os.getcwd()\n if \"tmp\" not in tmp_dir:\n tmp_dir = os.path.join(tmp_dir, \"tmp\")\n tmp_dir = os.path.join(tmp_dir, f\"{task_id.replace('/', '-')}-{random_id}\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n\n os.chdir(tmp_dir)\n env = {\"PATH\": f\"{node_exec}:\" + subprocess.os.environ[\"PATH\"]}\n open(f\"test.ts\", 'w').write(sample[\"test_code\"])\n cmd = f\"{tsc_exec}tsc test.ts --target ES2015 --lib ES2015,DOM\"\n compilation_result = subprocess.run(cmd, timeout=timeout, capture_output=True, env=env, shell=True)\n if compilation_result.returncode != 0:\n if compilation_result.stderr:\n err = compilation_result.stderr.decode()\n else:\n err = compilation_result.stdout.decode()\n result.append(f\"failed: compilation error: {err}\")\n else:\n try:\n exec_result = None\n with time_limit(timeout):\n exec_result = subprocess.run([f\"{node_exec}node\", \"test.js\"], timeout=timeout, capture_output=True)\n\n if exec_result.returncode == 0:\n result.append(\"passed\")\n else:\n if exec_result.stderr:\n try:\n err = exec_result.stderr.decode()\n except:\n err = exec_result.stderr\n else:\n try:\n err = exec_result.stdout.decode()\n except:\n err = exec_result.stdout\n result.append(f\"failed: {err}\")\n except TimeoutException:\n result.append(\"timed out\")\n if result[-1] != \"passed\":\n env = {\"PATH\": f\"{node_exec}:\" + subprocess.os.environ[\"PATH\"]}\n cmd = f\"{tsc_exec}tsc test.ts\"\n compilation_result = subprocess.run(cmd, timeout=timeout, capture_output=True, env=env, shell=True)\n if compilation_result.returncode != 0:\n if compilation_result.stderr:\n err = compilation_result.stderr.decode()\n else:\n err = compilation_result.stdout.decode()\n result[-1] = f\"failed: compilation error: {err}\"\n else:\n try:\n exec_result = None\n with time_limit(timeout):\n exec_result = subprocess.run([f\"{node_exec}node\", \"test.js\"], timeout=timeout, capture_output=True)\n\n if exec_result.returncode == 0:\n result[-1] = \"passed\"\n else:\n if exec_result.stderr:\n try:\n err = exec_result.stderr.decode()\n except:\n err = exec_result.stderr\n else:\n try:\n err = exec_result.stdout.decode()\n except:\n err = exec_result.stdout\n result[-1] = f\"failed: {err}\"\n except TimeoutException:\n result[-1] = \"timed out\"\n \n os.chdir(origin_path)\n shutil.rmtree(tmp_dir)\n elif \"cs\" in language_type.lower():\n import os\n import shutil\n origin_path = os.getcwd()\n if \"tmp\" not in tmp_dir:\n tmp_dir = os.path.join(tmp_dir, \"tmp\")\n tmp_dir = os.path.join(tmp_dir, f\"{task_id.replace('/', '-')}-{random_id}\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n os.chdir(tmp_dir)\n open(f\"Program.cs\", 'w').write(sample[\"test_code\"])\n cmd = f\"{cs_exec}mcs -d:DEBUG Program.cs\"\n compilation_result = subprocess.run(cmd, shell=True, capture_output=True)\n if compilation_result.returncode != 0:\n if compilation_result.stderr:\n err = compilation_result.stderr.decode()\n else:\n err = compilation_result.stdout.decode()\n result.append(f\"failed: compilation error: {err}\")\n else:\n try:\n exec_result = None\n cmd = f\"{cs_exec}mono Program.exe\"\n env = dict(MONO_TRACE_LISTENER=\"Console.Error\")\n with time_limit(timeout):\n exec_result = subprocess.run(cmd, timeout=timeout, shell=True, capture_output=True, env=env)\n\n if \"Fail\" not in exec_result.stderr.decode():\n result.append(\"passed\")\n else:\n if exec_result.stderr:\n try:\n err = exec_result.stderr.decode()\n except:\n err = exec_result.stderr\n else:\n try:\n err = exec_result.stdout.decode()\n except:\n err = exec_result.stdout\n result.append(f\"failed: {err}\")\n except TimeoutException:\n result.append(\"timed out\")\n except Exception as e:\n result.append(f\"failed: {e}\")\n os.chdir(origin_path)\n shutil.rmtree(tmp_dir)\n elif \"rust\" in language_type.lower(): \n import os \n \n WD: str = os.path.dirname(os.path.abspath(__file__))\n RUST_DIR: str = os.path.join(WD, \"rust\")\n RUST_SRC: str = os.path.join(RUST_DIR, \"src\")\n RUST_BIN: str = os.path.join(RUST_SRC, \"bin\")\n RUST_TMP_DIR: str = os.path.join(RUST_DIR, \"tmp\")\n RUST_LOGS: str = os.path.join(RUST_TMP_DIR, \"logs\")\n RUST_EXT: str = \".rs\" \n\n # Create mandatory tmp directories\n os.makedirs(RUST_TMP_DIR, exist_ok=True)\n os.makedirs(RUST_LOGS, exist_ok=True)\n os.makedirs(RUST_SRC, exist_ok=True)\n os.makedirs(RUST_BIN, exist_ok=True)\n\n with tempfile.NamedTemporaryFile(dir = RUST_BIN, delete=False) as f:\n #temporal file name\n file_prefix = sample[\"task_id\"].lower().replace(\"/\", \"_\")\n file_name:str = file_prefix +RUST_EXT\n \n os.rename(f.name, os.path.join(RUST_BIN, file_name))\n \n # Sample to pure Rust function\n rust_code: str = sample[\"test_code\"]\n\n # dump the rust source code in the target temporal file\n f.write(rust_code.encode('utf-8'))\n\n # Proceed towards Rust binaries compilation. Therefore move to Rust module root dir.\n os.chdir(RUST_DIR)\n\n # Two possible outcomes\n # Pass OR Fail compilation\n log_filename: str = file_prefix + \".jsonl\"\n log_path: str = os.path.join(RUST_LOGS, log_filename)\n cargo_check: str = \"cargo check --bin \" + file_prefix + \" --message-format json >> \" + log_path\n # Compilation build status\n returned_val_compilation: int\n \n # Overwrite file content\n if os.path.exists(log_path):\n if(file_size := os.path.getsize(log_path)) >= 0: \n os.remove(log_path)\n returned_val_compilation = os.system(cargo_check)\n\n else: \n returned_val_compilation = os.system(cargo_check)\n\n # 0 means success \n if returned_val_compilation == 0:\n\n #Execution pipeline\n cargo_test: str = \"cargo test --bin \" +file_prefix+ \" --message-format json >> \" + log_path\n returned_val_execution = os.system(cargo_test)\n \n if returned_val_execution == 0:\n result.append(\"passed\")\n else:\n result.append(f\"failed: execution error\") \n\n else:\n result.append(f\"failed: compilation error\")\n\n\n elif \"java\" in language_type.lower():\n assert tmp_dir is not None, \"Java should be evaluated in a temporary dir.\"\n\n import os\n import shutil\n\n if \"tmp\" not in tmp_dir:\n tmp_dir = os.path.join(tmp_dir, \"tmp\")\n tmp_dir = os.path.join(tmp_dir, f\"{task_id.replace('/', '-')}-{random_id}\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n open(os.path.join(tmp_dir, \"Problem.java\"), 'w').write(sample[\"test_code\"])\n origin_path = os.getcwd()\n os.system(f\"cp ./javatuples-1.2.jar {tmp_dir}/\")\n os.chdir(tmp_dir)\n res = \"failed: unknown error\"\n compile_returncode = -1\n for _ in range(5):\n try:\n cmd = f\"{java_exec}javac -cp javatuples-1.2.jar Problem.java\"\n compilation_result = subprocess.run(cmd, timeout=60, capture_output=True, shell=True) \n compile_returncode = compilation_result.returncode\n break\n except subprocess.TimeoutExpired as e:\n continue\n if compile_returncode != 0:\n res = \"failed: compilation error\"\n else:\n exec_result = None\n try:\n # WARNING\n # This program exists to execute untrusted model-generated code. Although\n # it is highly unlikely that model-generated code will do something overtly\n # malicious in response to this test suite, model-generated code may act\n # destructively due to a lack of model capability or alignment.\n # Users are strongly encouraged to sandbox this evaluation suite so that it\n # does not perform destructive actions on their host or network.\n # Once you have read this disclaimer and taken appropriate precautions,\n # uncomment the following line and proceed at your own risk:\n cmd = f\"{java_exec}java -ea -cp .:javatuples-1.2.jar Problem\"\n exec_result = subprocess.run(cmd, timeout=timeout, capture_output=True, shell=True) \n if exec_result.returncode == 0:\n res = \"passed\"\n elif exec_result.returncode == 1:\n if \"AssertionError\" in exec_result.stderr.decode('unicode-escape'):\n res = \"failed: wrong answer\"\n else:\n res = f\"failed: {exec_result.stderr.decode()}\"\n except subprocess.TimeoutExpired as e:\n res = \"time out\"\n except BaseException as e:\n res = f\"failed: {e}\"\n\n result.append(res) \n os.chdir(origin_path) \n shutil.rmtree(tmp_dir)\n \n manager = multiprocessing.Manager()\n result = manager.list()\n\n p = multiprocessing.Process(target=unsafe_execute, args=(tmp_dir,))\n p.start()\n p.join(timeout=timeout + 1)\n if p.is_alive():\n p.kill()\n\n if not result:\n result.append(\"timed out\")\n\n return {\n \"task_id\" : task_id,\n \"completion_id\": completion_id,\n \"result\" : result[0],\n \"passed\" : result[0] == \"passed\",\n \"finish\" : -1 if \"finish\" not in sample else sample[\"finish\"],\n \"code\" : sample[\"test_code\"]\n }"
}
] | import os
import sys
import fire
import json
import gzip
import regex
import numpy as np
import itertools
from typing import *
from tqdm.auto import tqdm
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from .data import stream_jsonl
from .execution import check_correctness | 7,149 | if is_mbpp:
return sample["generation"] + "\n" + "\n".join(problems[task_id]["test"])
prompt = sample["prompt"]
if example_test and "example_test" in problems[task_id] and problems[task_id]["example_test"] != "":
test = problems[task_id]["example_test"]
else:
test = problems[task_id]["test"]
code = sample["generation"]
# Pre-process for different languages
if language == "python":
test_setup = "\n".join(IMPORT_HELPER["python"]) + "\n"
test_string = test_setup + code + "\n" + test + "\n"
elif language == "cpp":
test_set_up = ""
for s in IMPORT_HELPER["cpp"]:
if s not in prompt:
test_set_up += s + "\n"
test_string = test_set_up + "\n" + code + "\n" + test
elif language == "java":
test_string = code + "\n" + test
elif language == "cs":
test_set_up = ""
for s in IMPORT_HELPER["cs"]:
test_set_up += s + "\n"
test_string = test_set_up + "\n" + code + "\n" + test
elif language in ["js", "javascript", "ts", "sh", "go"]:
test_string = code + "\n" + test
elif language == "go232":
import_string = problems[task_id]["import"]
prompt = prompt.replace(import_string, "")
if example_test and "example_test" in problems[task_id]:
test = problems[task_id]["example_test"]
else:
test = problems[task_id]["test"]
test_setup = problems[task_id]["test_setup"]
other_pkgs = []
for pkg in IMPORT_HELPER["go"]:
if pkg not in test_setup:
p = pkg.split("/")[-1]
if p + "." in code:
other_pkgs.append(f"\"{pkg}\"")
if other_pkgs:
import_other_pkgs = "import (\n" + " ".join([p + "\n" for p in other_pkgs]) + ")"
test_string = test_setup + "\n" + import_other_pkgs + "\n" + prompt + code + "\n" + test
else:
test_string = test_setup + "\n" + prompt + code + "\n" + test
elif language == "rust":
main = "\nfn main(){ \n } \n"
declaration = problems[task_id]["declaration"]
test_string = main + declaration + prompt + code + test
elif language == "php":
if code[:5] != "<?php":
code = "<?php\n" + code
test_string = code + "\n" + test + "?>"
return test_string
def stream_jsonl_all(filename: str) -> Iterable[Dict]:
"""
Streams a JSONL file.
"""
results = []
if filename.endswith(".gz"):
fp = gzip.open(open(filename, "rb"), "rt")
else:
fp = open(filename, "r")
for line in fp:
if any(not x.isspace() for x in line):
results.append(json.loads(line))
fp.close()
return results
def evaluate_functional_correctness(
input_file: str = None,
tmp_dir: str = "./",
n_workers: int = 32,
timeout: float = 10.0,
problem_file: str = "../data/humaneval_python.jsonl.gz",
out_dir: str = None,
k: List[int] = [1, 10, 100],
test_groundtruth: bool = False,
example_test: bool = False,
is_mbpp: bool = False,
language: str = "python",
):
"""
Evaluates the functional correctness of a model.
"""
if example_test:
print("Example test...")
problems = read_dataset(problem_file,
dataset_type="humaneval")
sample_jsonl = stream_jsonl_all(input_file)
with ThreadPoolExecutor(max_workers=n_workers) as executor:
futures = []
completion_id = Counter()
n_samples = 0
results = defaultdict(list)
if test_groundtruth:
print("Testing ground truth...")
for sample in tqdm(problems.values()):
task_id = sample["task_id"]
lang = task_id.split("/")[0].lower()
if lang == "javascript":
lang = "js"
tmp_dir_ = os.path.join(tmp_dir, lang, "evaluation")
sample["generation"] = sample["canonical_solution"]
sample["test_code"] = process_humaneval_test(sample, problems, example_test, language)
if sample["test_code"] is None:
continue
args = (task_id, sample, lang, timeout, tmp_dir_, completion_id[task_id])
|
IMPORT_HELPER = {
"python": [
"import math",
"import re",
"import sys",
"import copy",
"import datetime",
"import itertools",
"import collections",
"import heapq",
"import functools",
"import hashlib",
"import numpy",
"import numpy as np",
"import string",
"from typing import *",
"from collections import *",
],
"go" : [
"math",
"strings",
"fmt",
"strconv",
"time",
"bytes",
"regexp",
"sort",
"math/rand",
"crypto/md5",
],
"cpp" : [
"#include<stdlib.h>",
"#include<algorithm>",
"#include<math.h>",
"#include<stdio.h>",
"#include<vector>",
"#include<string>",
"#include<climits>",
"#include<cstring>",
"#include<iostream>",
"#include<cassert>"
],
"cs": ["using System.Numerics;", "using System.Diagnostics;", "using System.Collections.Generic;", "using System.Linq;", "using System.Text;", "using System.Security.Cryptography;", "using System.Collections.Generic;"]
}
LANGUAGE_NAME = {
"cpp" : "CPP",
"go" : "Go",
"java" : "Java",
"js" : "JavaScript",
"python": "Python",
}
def read_dataset(
data_file: str = None,
dataset_type: str = "humaneval",
num_shot=None,
) -> Dict:
"""
Reads a dataset and returns a dictionary of tasks.
"""
if num_shot is not None:
print(f"{num_shot}-shot setting...")
if "humaneval" in dataset_type.lower():
if data_file is None:
current_path = os.path.dirname(os.path.abspath(__file__))
data_file = os.path.join(current_path, "..", "humaneval-x", "python", "data", "humaneval_python.jsonl.gz")
dataset = {task["task_id"]: task for task in stream_jsonl(data_file)}
else:
raise f"Dataset: {dataset_type} not supported."
return dataset
def estimate_pass_at_k(
num_samples: Union[int, List[int], np.ndarray],
num_correct: Union[List[int], np.ndarray],
k: int
) -> np.ndarray:
"""
Estimates pass@k of each problem and returns them in an array.
"""
def estimator(n: int, c: int, k: int) -> float:
"""
Calculates 1 - comb(n - c, k) / comb(n, k).
"""
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
if isinstance(num_samples, int):
num_samples_it = itertools.repeat(num_samples, len(num_correct))
else:
assert len(num_samples) == len(num_correct)
num_samples_it = iter(num_samples)
return np.array([estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)])
def process_humaneval_test(sample, problems, example_test=False, is_mbpp=False, language="python"):
"""
Processes a sample for evaluation.
"""
task_id = sample["task_id"]
if is_mbpp:
return sample["generation"] + "\n" + "\n".join(problems[task_id]["test"])
prompt = sample["prompt"]
if example_test and "example_test" in problems[task_id] and problems[task_id]["example_test"] != "":
test = problems[task_id]["example_test"]
else:
test = problems[task_id]["test"]
code = sample["generation"]
# Pre-process for different languages
if language == "python":
test_setup = "\n".join(IMPORT_HELPER["python"]) + "\n"
test_string = test_setup + code + "\n" + test + "\n"
elif language == "cpp":
test_set_up = ""
for s in IMPORT_HELPER["cpp"]:
if s not in prompt:
test_set_up += s + "\n"
test_string = test_set_up + "\n" + code + "\n" + test
elif language == "java":
test_string = code + "\n" + test
elif language == "cs":
test_set_up = ""
for s in IMPORT_HELPER["cs"]:
test_set_up += s + "\n"
test_string = test_set_up + "\n" + code + "\n" + test
elif language in ["js", "javascript", "ts", "sh", "go"]:
test_string = code + "\n" + test
elif language == "go232":
import_string = problems[task_id]["import"]
prompt = prompt.replace(import_string, "")
if example_test and "example_test" in problems[task_id]:
test = problems[task_id]["example_test"]
else:
test = problems[task_id]["test"]
test_setup = problems[task_id]["test_setup"]
other_pkgs = []
for pkg in IMPORT_HELPER["go"]:
if pkg not in test_setup:
p = pkg.split("/")[-1]
if p + "." in code:
other_pkgs.append(f"\"{pkg}\"")
if other_pkgs:
import_other_pkgs = "import (\n" + " ".join([p + "\n" for p in other_pkgs]) + ")"
test_string = test_setup + "\n" + import_other_pkgs + "\n" + prompt + code + "\n" + test
else:
test_string = test_setup + "\n" + prompt + code + "\n" + test
elif language == "rust":
main = "\nfn main(){ \n } \n"
declaration = problems[task_id]["declaration"]
test_string = main + declaration + prompt + code + test
elif language == "php":
if code[:5] != "<?php":
code = "<?php\n" + code
test_string = code + "\n" + test + "?>"
return test_string
def stream_jsonl_all(filename: str) -> Iterable[Dict]:
"""
Streams a JSONL file.
"""
results = []
if filename.endswith(".gz"):
fp = gzip.open(open(filename, "rb"), "rt")
else:
fp = open(filename, "r")
for line in fp:
if any(not x.isspace() for x in line):
results.append(json.loads(line))
fp.close()
return results
def evaluate_functional_correctness(
input_file: str = None,
tmp_dir: str = "./",
n_workers: int = 32,
timeout: float = 10.0,
problem_file: str = "../data/humaneval_python.jsonl.gz",
out_dir: str = None,
k: List[int] = [1, 10, 100],
test_groundtruth: bool = False,
example_test: bool = False,
is_mbpp: bool = False,
language: str = "python",
):
"""
Evaluates the functional correctness of a model.
"""
if example_test:
print("Example test...")
problems = read_dataset(problem_file,
dataset_type="humaneval")
sample_jsonl = stream_jsonl_all(input_file)
with ThreadPoolExecutor(max_workers=n_workers) as executor:
futures = []
completion_id = Counter()
n_samples = 0
results = defaultdict(list)
if test_groundtruth:
print("Testing ground truth...")
for sample in tqdm(problems.values()):
task_id = sample["task_id"]
lang = task_id.split("/")[0].lower()
if lang == "javascript":
lang = "js"
tmp_dir_ = os.path.join(tmp_dir, lang, "evaluation")
sample["generation"] = sample["canonical_solution"]
sample["test_code"] = process_humaneval_test(sample, problems, example_test, language)
if sample["test_code"] is None:
continue
args = (task_id, sample, lang, timeout, tmp_dir_, completion_id[task_id]) | future = executor.submit(check_correctness, *args) | 1 | 2023-10-20 06:38:01+00:00 | 8k |
PKU-YuanGroup/Video-LLaVA | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 6,754 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta' | if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys(): | 4 | 2023-10-23 05:43:54+00:00 | 8k |
deepseek-ai/DreamCraft3D | threestudio/models/guidance/stable_diffusion_guidance.py | [
{
"identifier": "PromptProcessorOutput",
"path": "threestudio/models/prompt_processors/base.py",
"snippet": "class PromptProcessorOutput:\n text_embeddings: Float[Tensor, \"N Nf\"]\n uncond_text_embeddings: Float[Tensor, \"N Nf\"]\n text_embeddings_vd: Float[Tensor, \"Nv N Nf\"]\n uncond_text_embeddings_vd: Float[Tensor, \"Nv N Nf\"]\n directions: List[DirectionConfig]\n direction2idx: Dict[str, int]\n use_perp_neg: bool\n perp_neg_f_sb: Tuple[float, float, float]\n perp_neg_f_fsb: Tuple[float, float, float]\n perp_neg_f_fs: Tuple[float, float, float]\n perp_neg_f_sf: Tuple[float, float, float]\n\n def get_text_embeddings(\n self,\n elevation: Float[Tensor, \"B\"],\n azimuth: Float[Tensor, \"B\"],\n camera_distances: Float[Tensor, \"B\"],\n view_dependent_prompting: bool = True,\n ) -> Float[Tensor, \"BB N Nf\"]:\n batch_size = elevation.shape[0]\n\n if view_dependent_prompting:\n # Get direction\n direction_idx = torch.zeros_like(elevation, dtype=torch.long)\n for d in self.directions:\n direction_idx[\n d.condition(elevation, azimuth, camera_distances)\n ] = self.direction2idx[d.name]\n\n # Get text embeddings\n text_embeddings = self.text_embeddings_vd[direction_idx] # type: ignore\n uncond_text_embeddings = self.uncond_text_embeddings_vd[direction_idx] # type: ignore\n else:\n text_embeddings = self.text_embeddings.expand(batch_size, -1, -1) # type: ignore\n uncond_text_embeddings = self.uncond_text_embeddings.expand( # type: ignore\n batch_size, -1, -1\n )\n\n # IMPORTANT: we return (cond, uncond), which is in different order than other implementations!\n return torch.cat([text_embeddings, uncond_text_embeddings], dim=0)\n\n def get_text_embeddings_perp_neg(\n self,\n elevation: Float[Tensor, \"B\"],\n azimuth: Float[Tensor, \"B\"],\n camera_distances: Float[Tensor, \"B\"],\n view_dependent_prompting: bool = True,\n ) -> Tuple[Float[Tensor, \"BBBB N Nf\"], Float[Tensor, \"B 2\"]]:\n assert (\n view_dependent_prompting\n ), \"Perp-Neg only works with view-dependent prompting\"\n\n batch_size = elevation.shape[0]\n\n direction_idx = torch.zeros_like(elevation, dtype=torch.long)\n for d in self.directions:\n direction_idx[\n d.condition(elevation, azimuth, camera_distances)\n ] = self.direction2idx[d.name]\n # 0 - side view\n # 1 - front view\n # 2 - back view\n # 3 - overhead view\n\n pos_text_embeddings = []\n neg_text_embeddings = []\n neg_guidance_weights = []\n uncond_text_embeddings = []\n\n side_emb = self.text_embeddings_vd[0]\n front_emb = self.text_embeddings_vd[1]\n back_emb = self.text_embeddings_vd[2]\n overhead_emb = self.text_embeddings_vd[3]\n\n for idx, ele, azi, dis in zip(\n direction_idx, elevation, azimuth, camera_distances\n ):\n azi = shift_azimuth_deg(azi) # to (-180, 180)\n uncond_text_embeddings.append(\n self.uncond_text_embeddings_vd[idx]\n ) # should be \"\"\n if idx.item() == 3: # overhead view\n pos_text_embeddings.append(overhead_emb) # side view\n # dummy\n neg_text_embeddings += [\n self.uncond_text_embeddings_vd[idx],\n self.uncond_text_embeddings_vd[idx],\n ]\n neg_guidance_weights += [0.0, 0.0]\n else: # interpolating views\n if torch.abs(azi) < 90:\n # front-side interpolation\n # 0 - complete side, 1 - complete front\n r_inter = 1 - torch.abs(azi) / 90\n pos_text_embeddings.append(\n r_inter * front_emb + (1 - r_inter) * side_emb\n )\n neg_text_embeddings += [front_emb, side_emb]\n neg_guidance_weights += [\n -shifted_expotional_decay(*self.perp_neg_f_fs, r_inter),\n -shifted_expotional_decay(*self.perp_neg_f_sf, 1 - r_inter),\n ]\n else:\n # side-back interpolation\n # 0 - complete back, 1 - complete side\n r_inter = 2.0 - torch.abs(azi) / 90\n pos_text_embeddings.append(\n r_inter * side_emb + (1 - r_inter) * back_emb\n )\n neg_text_embeddings += [side_emb, front_emb]\n neg_guidance_weights += [\n -shifted_expotional_decay(*self.perp_neg_f_sb, r_inter),\n -shifted_expotional_decay(*self.perp_neg_f_fsb, r_inter),\n ]\n\n text_embeddings = torch.cat(\n [\n torch.stack(pos_text_embeddings, dim=0),\n torch.stack(uncond_text_embeddings, dim=0),\n torch.stack(neg_text_embeddings, dim=0),\n ],\n dim=0,\n )\n\n return text_embeddings, torch.as_tensor(\n neg_guidance_weights, device=elevation.device\n ).reshape(batch_size, 2)"
},
{
"identifier": "BaseObject",
"path": "threestudio/utils/base.py",
"snippet": "class BaseObject(Updateable):\n @dataclass\n class Config:\n pass\n\n cfg: Config # add this to every subclass of BaseObject to enable static type checking\n\n def __init__(\n self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs\n ) -> None:\n super().__init__()\n self.cfg = parse_structured(self.Config, cfg)\n self.device = get_device()\n self.configure(*args, **kwargs)\n\n def configure(self, *args, **kwargs) -> None:\n pass"
},
{
"identifier": "C",
"path": "threestudio/utils/misc.py",
"snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n if len(value) >= 6:\n select_i = 3\n for i in range(3, len(value) - 2, 2):\n if global_step >= value[i]:\n select_i = i + 2\n if select_i != 3:\n start_value, start_step = value[select_i - 3], value[select_i - 2]\n else:\n start_step, start_value = value[:2]\n end_value, end_step = value[select_i - 1], value[select_i]\n value = [start_step, start_value, end_value, end_step]\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value"
},
{
"identifier": "cleanup",
"path": "threestudio/utils/misc.py",
"snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()"
},
{
"identifier": "parse_version",
"path": "threestudio/utils/misc.py",
"snippet": "def parse_version(ver: str):\n return version.parse(ver)"
},
{
"identifier": "perpendicular_component",
"path": "threestudio/utils/ops.py",
"snippet": "def perpendicular_component(x: Float[Tensor, \"B C H W\"], y: Float[Tensor, \"B C H W\"]):\n # get the component of x that is perpendicular to y\n eps = torch.ones_like(x[:, 0, 0, 0]) * 1e-6\n return (\n x\n - (\n torch.mul(x, y).sum(dim=[1, 2, 3])\n / torch.maximum(torch.mul(y, y).sum(dim=[1, 2, 3]), eps)\n ).view(-1, 1, 1, 1)\n * y\n )"
}
] | from dataclasses import dataclass, field
from diffusers import DDIMScheduler, DDPMScheduler, StableDiffusionPipeline
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, cleanup, parse_version
from threestudio.utils.ops import perpendicular_component
from threestudio.utils.typing import *
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
import tomesd | 5,010 | latent_model_input,
torch.cat([t.reshape(1)] * 4).to(self.device),
encoder_hidden_states=text_embeddings,
) # (4B, 3, 64, 64)
noise_pred_text = noise_pred[:batch_size]
noise_pred_uncond = noise_pred[batch_size : batch_size * 2]
noise_pred_neg = noise_pred[batch_size * 2 :]
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t.reshape(1)] * 2).to(self.device),
encoder_hidden_states=text_embeddings,
)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
return noise_pred
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def guidance_eval(
self,
t_orig,
text_embeddings,
latents_noisy,
noise_pred,
use_perp_neg=False,
neg_guidance_weights=None,
):
# use only 50 timesteps, and find nearest of those to t
self.scheduler.set_timesteps(50)
self.scheduler.timesteps_gpu = self.scheduler.timesteps.to(self.device)
bs = (
min(self.cfg.max_items_eval, latents_noisy.shape[0])
if self.cfg.max_items_eval > 0
else latents_noisy.shape[0]
) # batch size
large_enough_idxs = self.scheduler.timesteps_gpu.expand([bs, -1]) > t_orig[
:bs
].unsqueeze(
-1
) # sized [bs,50] > [bs,1]
idxs = torch.min(large_enough_idxs, dim=1)[1]
t = self.scheduler.timesteps_gpu[idxs]
fracs = list((t / self.scheduler.config.num_train_timesteps).cpu().numpy())
imgs_noisy = self.decode_latents(latents_noisy[:bs]).permute(0, 2, 3, 1)
# get prev latent
latents_1step = []
pred_1orig = []
for b in range(bs):
step_output = self.scheduler.step(
noise_pred[b : b + 1], t[b], latents_noisy[b : b + 1], eta=1
)
latents_1step.append(step_output["prev_sample"])
pred_1orig.append(step_output["pred_original_sample"])
latents_1step = torch.cat(latents_1step)
pred_1orig = torch.cat(pred_1orig)
imgs_1step = self.decode_latents(latents_1step).permute(0, 2, 3, 1)
imgs_1orig = self.decode_latents(pred_1orig).permute(0, 2, 3, 1)
latents_final = []
for b, i in enumerate(idxs):
latents = latents_1step[b : b + 1]
text_emb = (
text_embeddings[
[b, b + len(idxs), b + 2 * len(idxs), b + 3 * len(idxs)], ...
]
if use_perp_neg
else text_embeddings[[b, b + len(idxs)], ...]
)
neg_guid = neg_guidance_weights[b : b + 1] if use_perp_neg else None
for t in tqdm(self.scheduler.timesteps[i + 1 :], leave=False):
# pred noise
noise_pred = self.get_noise_pred(
latents, t, text_emb, use_perp_neg, neg_guid
)
# get prev latent
latents = self.scheduler.step(noise_pred, t, latents, eta=1)[
"prev_sample"
]
latents_final.append(latents)
latents_final = torch.cat(latents_final)
imgs_final = self.decode_latents(latents_final).permute(0, 2, 3, 1)
return {
"bs": bs,
"noise_levels": fracs,
"imgs_noisy": imgs_noisy,
"imgs_1step": imgs_1step,
"imgs_1orig": imgs_1orig,
"imgs_final": imgs_final,
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None:
|
@threestudio.register("stable-diffusion-guidance")
class StableDiffusionGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
cache_dir: Optional[str] = None
local_files_only: Optional[bool] = False
pretrained_model_name_or_path: str = "runwayml/stable-diffusion-v1-5"
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
guidance_scale: float = 100.0
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
time_prior: Optional[Any] = None # [w1,w2,s1,s2]
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
max_step_percent_annealed: float = 0.5
anneal_start_step: Optional[int] = None
use_sjc: bool = False
var_red: bool = True
weighting_strategy: str = "sds"
token_merging: bool = False
token_merging_params: Optional[dict] = field(default_factory=dict)
view_dependent_prompting: bool = True
"""Maximum number of batch items to evaluate guidance for (for debugging) and to save on disk. -1 means save all items."""
max_items_eval: int = 4
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading Stable Diffusion ...")
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
pipe_kwargs = {
"tokenizer": None,
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
"cache_dir": self.cfg.cache_dir,
"local_files_only": self.cfg.local_files_only
}
self.pipe = StableDiffusionPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
**pipe_kwargs,
).to(self.device)
if self.cfg.enable_memory_efficient_attention:
if parse_version(torch.__version__) >= parse_version("2"):
threestudio.info(
"PyTorch2.0 uses memory efficient attention by default."
)
elif not is_xformers_available():
threestudio.warn(
"xformers is not available, memory efficient attention is not enabled."
)
else:
self.pipe.enable_xformers_memory_efficient_attention()
if self.cfg.enable_sequential_cpu_offload:
self.pipe.enable_sequential_cpu_offload()
if self.cfg.enable_attention_slicing:
self.pipe.enable_attention_slicing(1)
if self.cfg.enable_channels_last_format:
self.pipe.unet.to(memory_format=torch.channels_last)
del self.pipe.text_encoder
cleanup()
# Create model
self.vae = self.pipe.vae.eval()
self.unet = self.pipe.unet.eval()
for p in self.vae.parameters():
p.requires_grad_(False)
for p in self.unet.parameters():
p.requires_grad_(False)
if self.cfg.token_merging:
tomesd.apply_patch(self.unet, **self.cfg.token_merging_params)
if self.cfg.use_sjc:
# score jacobian chaining use DDPM
self.scheduler = DDPMScheduler.from_pretrained(
self.cfg.pretrained_model_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
beta_start=0.00085,
beta_end=0.0120,
beta_schedule="scaled_linear",
cache_dir=self.cfg.cache_dir,
)
else:
self.scheduler = DDIMScheduler.from_pretrained(
self.cfg.pretrained_model_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
local_files_only=self.cfg.local_files_only,
)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
self.set_min_max_steps() # set to default value
if self.cfg.time_prior is not None:
m1, m2, s1, s2 = self.cfg.time_prior
weights = torch.cat(
(
torch.exp(
-((torch.arange(self.num_train_timesteps, m1, -1) - m1) ** 2)
/ (2 * s1**2)
),
torch.ones(m1 - m2 + 1),
torch.exp(
-((torch.arange(m2 - 1, 0, -1) - m2) ** 2) / (2 * s2**2)
),
)
)
weights = weights / torch.sum(weights)
self.time_prior_acc_weights = torch.cumsum(weights, dim=0)
self.alphas: Float[Tensor, "..."] = self.scheduler.alphas_cumprod.to(
self.device
)
if self.cfg.use_sjc:
# score jacobian chaining need mu
self.us: Float[Tensor, "..."] = torch.sqrt((1 - self.alphas) / self.alphas)
self.grad_clip_val: Optional[float] = None
threestudio.info(f"Loaded Stable Diffusion!")
@torch.cuda.amp.autocast(enabled=False)
def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0.98):
self.min_step = int(self.num_train_timesteps * min_step_percent)
self.max_step = int(self.num_train_timesteps * max_step_percent)
@torch.cuda.amp.autocast(enabled=False)
def forward_unet(
self,
latents: Float[Tensor, "..."],
t: Float[Tensor, "..."],
encoder_hidden_states: Float[Tensor, "..."],
) -> Float[Tensor, "..."]:
input_dtype = latents.dtype
return self.unet(
latents.to(self.weights_dtype),
t.to(self.weights_dtype),
encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype),
).sample.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def encode_images(
self, imgs: Float[Tensor, "B 3 512 512"]
) -> Float[Tensor, "B 4 64 64"]:
input_dtype = imgs.dtype
imgs = imgs * 2.0 - 1.0
posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist
latents = posterior.sample() * self.vae.config.scaling_factor
return latents.to(input_dtype)
@torch.cuda.amp.autocast(enabled=False)
def decode_latents(
self,
latents: Float[Tensor, "B 4 H W"],
latent_height: int = 64,
latent_width: int = 64,
) -> Float[Tensor, "B 3 512 512"]:
input_dtype = latents.dtype
latents = F.interpolate(
latents, (latent_height, latent_width), mode="bilinear", align_corners=False
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents.to(self.weights_dtype)).sample
image = (image * 0.5 + 0.5).clamp(0, 1)
return image.to(input_dtype)
def compute_grad_sds(
self,
latents: Float[Tensor, "B 4 64 64"],
t: Int[Tensor, "B"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
):
batch_size = elevation.shape[0]
if prompt_utils.use_perp_neg:
(
text_embeddings,
neg_guidance_weights,
) = prompt_utils.get_text_embeddings_perp_neg(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
with torch.no_grad():
noise = torch.randn_like(latents)
latents_noisy = self.scheduler.add_noise(latents, noise, t)
latent_model_input = torch.cat([latents_noisy] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 4),
encoder_hidden_states=text_embeddings,
) # (4B, 3, 64, 64)
noise_pred_text = noise_pred[:batch_size]
noise_pred_uncond = noise_pred[batch_size : batch_size * 2]
noise_pred_neg = noise_pred[batch_size * 2 :]
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
neg_guidance_weights = None
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents) # TODO: use torch generator
latents_noisy = self.scheduler.add_noise(latents, noise, t)
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 2),
encoder_hidden_states=text_embeddings,
)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
if self.cfg.weighting_strategy == "sds":
# w(t), sigma_t^2
w = (1 - self.alphas[t]).view(-1, 1, 1, 1)
elif self.cfg.weighting_strategy == "uniform":
w = 1
elif self.cfg.weighting_strategy == "fantasia3d":
w = (self.alphas[t] ** 0.5 * (1 - self.alphas[t])).view(-1, 1, 1, 1)
else:
raise ValueError(
f"Unknown weighting strategy: {self.cfg.weighting_strategy}"
)
grad = w * (noise_pred - noise)
guidance_eval_utils = {
"use_perp_neg": prompt_utils.use_perp_neg,
"neg_guidance_weights": neg_guidance_weights,
"text_embeddings": text_embeddings,
"t_orig": t,
"latents_noisy": latents_noisy,
"noise_pred": noise_pred,
}
return grad, guidance_eval_utils
def compute_grad_sjc(
self,
latents: Float[Tensor, "B 4 64 64"],
t: Int[Tensor, "B"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
):
batch_size = elevation.shape[0]
sigma = self.us[t]
sigma = sigma.view(-1, 1, 1, 1)
if prompt_utils.use_perp_neg:
(
text_embeddings,
neg_guidance_weights,
) = prompt_utils.get_text_embeddings_perp_neg(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
with torch.no_grad():
noise = torch.randn_like(latents)
y = latents
zs = y + sigma * noise
scaled_zs = zs / torch.sqrt(1 + sigma**2)
# pred noise
latent_model_input = torch.cat([scaled_zs] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 4),
encoder_hidden_states=text_embeddings,
) # (4B, 3, 64, 64)
noise_pred_text = noise_pred[:batch_size]
noise_pred_uncond = noise_pred[batch_size : batch_size * 2]
noise_pred_neg = noise_pred[batch_size * 2 :]
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
neg_guidance_weights = None
text_embeddings = prompt_utils.get_text_embeddings(
elevation, azimuth, camera_distances, self.cfg.view_dependent_prompting
)
# predict the noise residual with unet, NO grad!
with torch.no_grad():
# add noise
noise = torch.randn_like(latents) # TODO: use torch generator
y = latents
zs = y + sigma * noise
scaled_zs = zs / torch.sqrt(1 + sigma**2)
# pred noise
latent_model_input = torch.cat([scaled_zs] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t] * 2),
encoder_hidden_states=text_embeddings,
)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
Ds = zs - sigma * noise_pred
if self.cfg.var_red:
grad = -(Ds - y) / sigma
else:
grad = -(Ds - zs) / sigma
guidance_eval_utils = {
"use_perp_neg": prompt_utils.use_perp_neg,
"neg_guidance_weights": neg_guidance_weights,
"text_embeddings": text_embeddings,
"t_orig": t,
"latents_noisy": scaled_zs,
"noise_pred": noise_pred,
}
return grad, guidance_eval_utils
def __call__(
self,
rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
elevation: Float[Tensor, "B"],
azimuth: Float[Tensor, "B"],
camera_distances: Float[Tensor, "B"],
rgb_as_latents=False,
guidance_eval=False,
current_step_ratio=None,
**kwargs,
):
batch_size = rgb.shape[0]
rgb_BCHW = rgb.permute(0, 3, 1, 2)
latents: Float[Tensor, "B 4 64 64"]
if rgb_as_latents:
latents = F.interpolate(
rgb_BCHW, (64, 64), mode="bilinear", align_corners=False
)
else:
rgb_BCHW_512 = F.interpolate(
rgb_BCHW, (512, 512), mode="bilinear", align_corners=False
)
# encode image into latents with vae
latents = self.encode_images(rgb_BCHW_512)
if self.cfg.time_prior is not None:
time_index = torch.where(
(self.time_prior_acc_weights - current_step_ratio) > 0
)[0][0]
if time_index == 0 or torch.abs(
self.time_prior_acc_weights[time_index] - current_step_ratio
) < torch.abs(
self.time_prior_acc_weights[time_index - 1] - current_step_ratio
):
t = self.num_train_timesteps - time_index
else:
t = self.num_train_timesteps - time_index + 1
t = torch.clip(t, self.min_step, self.max_step + 1)
t = torch.full((batch_size,), t, dtype=torch.long, device=self.device)
else:
# timestep ~ U(0.02, 0.98) to avoid very high/low noise level
t = torch.randint(
self.min_step,
self.max_step + 1,
[batch_size],
dtype=torch.long,
device=self.device,
)
if self.cfg.use_sjc:
grad, guidance_eval_utils = self.compute_grad_sjc(
latents, t, prompt_utils, elevation, azimuth, camera_distances
)
else:
grad, guidance_eval_utils = self.compute_grad_sds(
latents, t, prompt_utils, elevation, azimuth, camera_distances
)
grad = torch.nan_to_num(grad)
# clip grad for stable training?
if self.grad_clip_val is not None:
grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val)
# loss = SpecifyGradient.apply(latents, grad)
# SpecifyGradient is not straghtforward, use a reparameterization trick instead
target = (latents - grad).detach()
# d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad
loss_sds = 0.5 * F.mse_loss(latents, target, reduction="sum") / batch_size
guidance_out = {
"loss_sd": loss_sds,
"grad_norm": grad.norm(),
"min_step": self.min_step,
"max_step": self.max_step,
}
if guidance_eval:
guidance_eval_out = self.guidance_eval(**guidance_eval_utils)
texts = []
for n, e, a, c in zip(
guidance_eval_out["noise_levels"], elevation, azimuth, camera_distances
):
texts.append(
f"n{n:.02f}\ne{e.item():.01f}\na{a.item():.01f}\nc{c.item():.02f}"
)
guidance_eval_out.update({"texts": texts})
guidance_out.update({"eval": guidance_eval_out})
return guidance_out
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def get_noise_pred(
self,
latents_noisy,
t,
text_embeddings,
use_perp_neg=False,
neg_guidance_weights=None,
):
batch_size = latents_noisy.shape[0]
if use_perp_neg:
# pred noise
latent_model_input = torch.cat([latents_noisy] * 4, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t.reshape(1)] * 4).to(self.device),
encoder_hidden_states=text_embeddings,
) # (4B, 3, 64, 64)
noise_pred_text = noise_pred[:batch_size]
noise_pred_uncond = noise_pred[batch_size : batch_size * 2]
noise_pred_neg = noise_pred[batch_size * 2 :]
e_pos = noise_pred_text - noise_pred_uncond
accum_grad = 0
n_negative_prompts = neg_guidance_weights.shape[-1]
for i in range(n_negative_prompts):
e_i_neg = noise_pred_neg[i::n_negative_prompts] - noise_pred_uncond
accum_grad += neg_guidance_weights[:, i].view(
-1, 1, 1, 1
) * perpendicular_component(e_i_neg, e_pos)
noise_pred = noise_pred_uncond + self.cfg.guidance_scale * (
e_pos + accum_grad
)
else:
# pred noise
latent_model_input = torch.cat([latents_noisy] * 2, dim=0)
noise_pred = self.forward_unet(
latent_model_input,
torch.cat([t.reshape(1)] * 2).to(self.device),
encoder_hidden_states=text_embeddings,
)
# perform guidance (high scale from paper!)
noise_pred_text, noise_pred_uncond = noise_pred.chunk(2)
noise_pred = noise_pred_text + self.cfg.guidance_scale * (
noise_pred_text - noise_pred_uncond
)
return noise_pred
@torch.cuda.amp.autocast(enabled=False)
@torch.no_grad()
def guidance_eval(
self,
t_orig,
text_embeddings,
latents_noisy,
noise_pred,
use_perp_neg=False,
neg_guidance_weights=None,
):
# use only 50 timesteps, and find nearest of those to t
self.scheduler.set_timesteps(50)
self.scheduler.timesteps_gpu = self.scheduler.timesteps.to(self.device)
bs = (
min(self.cfg.max_items_eval, latents_noisy.shape[0])
if self.cfg.max_items_eval > 0
else latents_noisy.shape[0]
) # batch size
large_enough_idxs = self.scheduler.timesteps_gpu.expand([bs, -1]) > t_orig[
:bs
].unsqueeze(
-1
) # sized [bs,50] > [bs,1]
idxs = torch.min(large_enough_idxs, dim=1)[1]
t = self.scheduler.timesteps_gpu[idxs]
fracs = list((t / self.scheduler.config.num_train_timesteps).cpu().numpy())
imgs_noisy = self.decode_latents(latents_noisy[:bs]).permute(0, 2, 3, 1)
# get prev latent
latents_1step = []
pred_1orig = []
for b in range(bs):
step_output = self.scheduler.step(
noise_pred[b : b + 1], t[b], latents_noisy[b : b + 1], eta=1
)
latents_1step.append(step_output["prev_sample"])
pred_1orig.append(step_output["pred_original_sample"])
latents_1step = torch.cat(latents_1step)
pred_1orig = torch.cat(pred_1orig)
imgs_1step = self.decode_latents(latents_1step).permute(0, 2, 3, 1)
imgs_1orig = self.decode_latents(pred_1orig).permute(0, 2, 3, 1)
latents_final = []
for b, i in enumerate(idxs):
latents = latents_1step[b : b + 1]
text_emb = (
text_embeddings[
[b, b + len(idxs), b + 2 * len(idxs), b + 3 * len(idxs)], ...
]
if use_perp_neg
else text_embeddings[[b, b + len(idxs)], ...]
)
neg_guid = neg_guidance_weights[b : b + 1] if use_perp_neg else None
for t in tqdm(self.scheduler.timesteps[i + 1 :], leave=False):
# pred noise
noise_pred = self.get_noise_pred(
latents, t, text_emb, use_perp_neg, neg_guid
)
# get prev latent
latents = self.scheduler.step(noise_pred, t, latents, eta=1)[
"prev_sample"
]
latents_final.append(latents)
latents_final = torch.cat(latents_final)
imgs_final = self.decode_latents(latents_final).permute(0, 2, 3, 1)
return {
"bs": bs,
"noise_levels": fracs,
"imgs_noisy": imgs_noisy,
"imgs_1step": imgs_1step,
"imgs_1orig": imgs_1orig,
"imgs_final": imgs_final,
}
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# clip grad for stable training as demonstrated in
# Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation
# http://arxiv.org/abs/2303.15413
if self.cfg.grad_clip is not None: | self.grad_clip_val = C(self.cfg.grad_clip, epoch, global_step) | 2 | 2023-10-23 07:40:20+00:00 | 8k |
YORG-AI/Open-Assistant | package/src/yorgassistant/core/assistant/async_threads.py | [
{
"identifier": "Assistants",
"path": "package/src/yorgassistant/core/assistant/assistant.py",
"snippet": "class Assistants():\n def __init__(self, config,yaml_path:Optional[str] = None):\n self.config = config\n YamlPathConfig.assistants_yaml_path = yaml_path if yaml_path else 'assistants.yaml'\n \n def set_assistants_yaml_path(yaml_path: str):\n # 检查 yaml_path 是否为绝对路径\n if not os.path.isabs(yaml_path):\n # 获取调用此方法的栈帧\n stack = inspect.stack()\n caller_frame = stack[1]\n # 获取调用者的文件路径\n caller_path = caller_frame.filename\n # 获取调用者的目录路径\n caller_dir = os.path.dirname(caller_path)\n # 构建 yaml 文件的绝对路径\n full_yaml_path = os.path.join(caller_dir, yaml_path)\n else:\n full_yaml_path = yaml_path\n\n # 获取 yaml 文件所在的目录\n yaml_dir = os.path.dirname(full_yaml_path)\n # 如果目录不存在,则创建它\n os.makedirs(yaml_dir, exist_ok=True)\n # 设置 assistants_yaml_path\n YamlPathConfig.assistants_yaml_path = full_yaml_path\n\n def save_to_yaml(self):\n # 构建 assistants.yaml 文件的绝对路径\n assistants_yaml_path = YamlPathConfig.assistants_yaml_path\n # 检查文件是否存在,如果不存在,则创建一个空的yaml文件\n if not os.path.exists(assistants_yaml_path):\n with open(assistants_yaml_path, 'w') as file:\n file.write('') # 创建一个空文件\n # 使用绝对路径打开 assistants.yaml 文件\n with open(assistants_yaml_path, 'r') as file:\n data = yaml.safe_load(file) or []\n # 查找具有相同 id 的 assistant\n for i, d in enumerate(data):\n if d['id'] == self.config.id:\n # 如果找到了,就更新它\n data[i] = self.config.__dict__\n break\n else:\n # 如果没有找到,就添加新的 assistant 到列表中\n data.append(self.config.__dict__)\n # 写回 YAML 文件\n with open(assistants_yaml_path, 'w') as file:\n yaml.dump(data, file)\n\n @property\n def id(self):\n return self.config.id\n\n @property\n def name(self):\n return self.config.name\n\n @name.setter\n def name(self, value):\n self.config.name = value\n self.save_to_yaml() # 更新 YAML 文件\n\n @property\n def instructions(self):\n return self.config.instructions\n\n @instructions.setter\n def instructions(self, value):\n self.config.instructions = value\n\n @property\n def description(self):\n return self.config.description\n\n @description.setter\n def description(self, value):\n self.config.description = value\n\n @property\n def tools(self):\n return self.config.tools\n\n @tools.setter\n def tools(self, value):\n self.config.tools = value\n self.save_to_yaml() # 更新 YAML 文件\n\n @property\n def model(self):\n return self.config.model\n\n @model.setter\n def model(self, value):\n self.config.model = value\n self.save_to_yaml() # 更新 YAML 文件\n\n def get_tools_type_list(self):\n return [tool['type'] for tool in self.config.tools]\n\n @staticmethod\n def create(name: str = None, instructions: str = None, tools: list[dict] = [{'type':''}], model: str = 'gpt-4', description: str = None, file_ids: list = None) -> 'Assistants':\n # 创建配置和 Assistants 对象\n config = AssistantConfig(\n id=str(uuid.uuid4()),\n created_at=int(time.time()),\n name=name,\n description=description,\n instructions=instructions,\n tools=tools,\n model=model,\n file_ids=file_ids if file_ids is not None else [],\n )\n assistant = Assistants(config,YamlPathConfig.assistants_yaml_path)\n assistant.save_to_yaml() # 保存到 YAML 文件\n return assistant\n \n @staticmethod\n def get_all_assistants() -> List[Dict[str, Any]]:\n \"\"\"\n 读取 YAML 文件并返回所有 assistants 的信息列表。\n \"\"\"\n # 确保 YAML 文件路径已经被设置\n if YamlPathConfig.assistants_yaml_path:\n if not os.path.isfile(YamlPathConfig.assistants_yaml_path):\n # 如果文件路径存在但文件不存在,则创建一个空文件\n with open(YamlPathConfig.assistants_yaml_path, 'w') as file:\n yaml.dump([], file)\n else:\n raise FileNotFoundError(\"The threads YAML file path is not set.\")\n\n # 读取 YAML 文件\n with open(YamlPathConfig.assistants_yaml_path, 'r') as file:\n assistants_data = yaml.safe_load(file) or []\n # 使用 from_dict 方法将每个字典转换为 AssistantConfig 实例\n assistants_list = []\n for item in assistants_data:\n config = AssistantConfig(**item)\n assistants_list.append(config)\n return assistants_list\n @classmethod\n def from_id(cls, id: str) -> 'Assistants':\n # 使用传入的 yaml_path 参数打开 YAML 文件\n with open(YamlPathConfig.assistants_yaml_path, 'r') as file:\n data = yaml.safe_load(file) or []\n # 查找具有相同 id 的配置\n for d in data:\n if d['id'] == id:\n # 如果找到了,就用这个配置创建一个新的 Assistants 对象\n config = AssistantConfig(**d)\n return cls(config, YamlPathConfig.assistants_yaml_path) # 使用传入的 yaml_path 创建 Assistants 实例\n # 如果没有找到,就抛出一个异常\n raise ValueError(f'No assistant with id {id} found in YAML file.')\n \n @classmethod\n def delete_by_id(cls, id: str):\n\n # 使用绝对路径打开 assistants.yaml 文件\n with open(YamlPathConfig.assistants_yaml_path, 'r') as file:\n data = yaml.safe_load(file) or []\n\n # 查找具有相同 id 的 assistant\n for i, d in enumerate(data):\n if d['id'] == id:\n # 如果找到了,就删除它\n del data[i]\n break\n else:\n # 如果没有找到,就抛出一个异常\n raise ValueError(f'No assistant with id {id} found in YAML file.')\n\n # 写回 YAML 文件\n with open(YamlPathConfig.assistants_yaml_path, 'w') as file:\n yaml.dump(data, file)"
},
{
"identifier": "OpenAINode",
"path": "package/src/yorgassistant/core/nodes/openai/openai.py",
"snippet": "class OpenAINode(BaseNode):\n config: NodeConfig = NodeConfig(**openai_node_config)\n\n history: list[dict[str, any]]\n functions: list[dict[str, any]]\n\n cur_role: Optional[str]\n cur_content: Optional[str]\n\n def __init__(self):\n super().__init__()\n\n self.history = []\n self.functions = []\n\n self.cur_role = None\n self.cur_content = None\n\n def complete(self, input: CompleteInput):\n \"\"\"\n Complete with only current history. No extra messages.\n \"\"\"\n return self._make_completion([], input)\n\n # TODO: generalize these chat functions\n def chat(self, input: ChatInput):\n \"\"\"\n Chat with OpenAI's model with simple text.\n \"\"\"\n return self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.message_text,\n )\n ],\n input,\n )\n\n def chat_with_prompt_template(self, input: ChatWithPromptTemplateInput):\n \"\"\"\n Chat with OpenAI's model with a specific prompt template.\n \"\"\"\n return self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.prompt_template.format(**input.params),\n )\n ],\n input,\n )\n\n def chat_with_message(self, input: ChatWithMessageInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return self._make_completion([input.message], input)\n\n def chat_with_messages(self, input: ChatWithMessagesInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return self._make_completion(input.messages, input)\n\n def use_old_openai_with_prompt(self, input: OldCompleteInput):\n return self._make_old_completion(input.prompt, input)\n\n def _make_old_completion(\n self, prompt: str, input: OldCompleteConfig\n ) -> OpenAIOldResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\"model\": input.model, \"max_tokens\": 1096}\n\n kwargs[\"prompt\"] = prompt\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = OpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = client.completions.create(**kwargs)\n except Exception as e:\n logging.warn(f\"openai_node._make_completion: error occurred: {e}\")\n return OpenAIOldResp(\n text=f\"Error occurred: {e}\",\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n # TODO 目前不支持流式处理\n resp = OpenAIOldResp(text=\"\", finish_reason=\"\")\n for completion in response:\n resp.text += completion[\"choices\"][0][\"text\"]\n if choice.finish_reason:\n resp.finish_reason = completion[\"choices\"][0][\"finish_reason\"]\n break\n return resp\n\n resp = OpenAIOldResp(**response.choices[0].model_dump())\n return resp\n\n def _make_completion(\n self, messages: list[Message], input: ChatConfig\n ) -> OpenAIResp | OpenAIStreamingResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\n \"model\": input.model,\n }\n\n cur_messages = []\n\n # if history is empty, add a default system message\n if len(self.history) == 0:\n cur_messages.append(\n Message(\n role=\"system\",\n content=\"You are a helpful AI assistant. You should answer the user's questions and help them with their tasks.\",\n ).dict(exclude_none=True)\n )\n else:\n cur_messages += self.history\n\n # append history if needed\n if input.append_history:\n for message in messages:\n self.add_single_message(message)\n\n # add all input messages to argument `messages`\n for message in messages:\n cur_messages.append(message.dict(exclude_none=True))\n\n kwargs[\"messages\"] = tt.trim(cur_messages, input.model, max_tokens=9999)\n\n # add function definitions if exists\n if len(self.functions) > 0:\n kwargs[\"functions\"] = self.functions\n kwargs[\"function_call\"] = \"auto\"\n\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = OpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = client.chat.completions.create(**kwargs)\n except Exception as e:\n logging.warn(f\"openai_node._make_completion: error occurred: {e}\")\n return OpenAIResp(\n message=Message(\n role=\"system\",\n content=f\"Error occurred: {e}\",\n ),\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n resp = OpenAIStreamingResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.delta.dict(exclude_none=True))\n return resp\n\n resp = OpenAIResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.message.dict(exclude_none=True))\n return resp\n\n def add_function(self, func_def: FunctionDefinition):\n self.functions.append(\n func_def.dict()\n ) # redefined dict() doesn't have exclude_none arg\n\n def add_single_message(self, msg: Message):\n if self.cur_role is not None and self.cur_content is not None:\n self.history.append(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n ).dict(exclude_none=True)\n )\n self.cur_role = None\n self.cur_content = None\n\n self.history.append(msg.dict(exclude_none=True))\n\n def add_system_message(self, content: str):\n self.add_single_message(\n Message(\n role=\"system\",\n content=content,\n )\n )\n\n def add_role(self, role: str):\n if self.cur_role is not None and self.cur_content is not None:\n self.add_single_message(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n )\n )\n\n self.cur_role = role\n\n def add_content(self, content: str):\n if self.cur_content is not None:\n self.cur_content += content\n else:\n self.cur_content = content"
},
{
"identifier": "AsyncOpenAINode",
"path": "package/src/yorgassistant/core/nodes/openai/openai.py",
"snippet": "class AsyncOpenAINode(BaseNode):\n config: NodeConfig = NodeConfig(**openai_node_config)\n\n history: list[dict[str, any]]\n functions: list[dict[str, any]]\n\n cur_role: Optional[str]\n cur_content: Optional[str]\n\n def __init__(self):\n super().__init__()\n\n self.history = []\n self.functions = []\n\n self.cur_role = None\n self.cur_content = None\n\n openai.api_key = os.getenv(\"OPENAI_CHAT_API_KEY\")\n openai.api_base = os.getenv(\"OPENAI_CHAT_API_BASE\")\n\n async def complete(self, input: CompleteInput):\n \"\"\"\n Complete with only current history. No extra messages.\n \"\"\"\n return await self._make_completion([], input)\n\n # TODO: generalize these chat functions\n async def chat(self, input: ChatInput):\n \"\"\"\n Chat with OpenAI's model with simple text.\n \"\"\"\n return await self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.message_text,\n )\n ],\n input,\n )\n\n async def chat_with_prompt_template(self, input: ChatWithPromptTemplateInput):\n \"\"\"\n Chat with OpenAI's model with a specific prompt template.\n \"\"\"\n return await self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.prompt_template.format(**input.params),\n )\n ],\n input,\n )\n\n async def chat_with_message(self, input: ChatWithMessageInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return await self._make_completion([input.message], input)\n\n async def chat_with_messages(self, input: ChatWithMessagesInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return await self._make_completion(input.messages, input)\n\n async def use_old_openai_with_prompt(self, input: OldCompleteInput):\n return await self._make_old_completion(input.prompt, input)\n\n async def _make_old_completion(\n self, prompt: str, input: OldCompleteConfig\n ) -> OpenAIOldResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\"model\": input.model, \"max_tokens\": 1096}\n\n kwargs[\"prompt\"] = prompt\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = OpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = client.completions.create(**kwargs)\n except Exception as e:\n logging.warn(f\"openai_node._make_completion: error occurred: {e}\")\n return OpenAIOldResp(\n text=f\"Error occurred: {e}\",\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n # TODO 目前不支持流式处理\n resp = OpenAIOldResp(text=\"\", finish_reason=\"\")\n for completion in response:\n resp.text += completion[\"choices\"][0][\"text\"]\n if choice.finish_reason:\n resp.finish_reason = completion[\"choices\"][0][\"finish_reason\"]\n break\n return resp\n\n resp = OpenAIOldResp(**response.choices[0].model_dump())\n return resp\n\n async def _make_completion(\n self, messages: list[Message], input: ChatConfig\n ) -> OpenAIResp | OpenAIStreamingResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\n \"model\": input.model,\n }\n\n cur_messages = []\n\n # if history is empty, add a default system message\n if len(self.history) == 0:\n cur_messages.append(\n Message(\n role=\"system\",\n content=\"You are a helpful AI assistant. You should answer the user's questions and help them with their tasks.\",\n ).dict(exclude_none=True)\n )\n else:\n cur_messages += self.history\n\n # append history if needed\n if input.append_history:\n for message in messages:\n self.add_single_message(message)\n\n # add all input messages to argument `messages`\n for message in messages:\n cur_messages.append(message.dict(exclude_none=True))\n\n kwargs[\"messages\"] = tt.trim(cur_messages, input.model, max_tokens=9999)\n\n # add function definitions if exists\n if len(self.functions) > 0:\n kwargs[\"functions\"] = self.functions\n kwargs[\"function_call\"] = \"auto\"\n\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = AsyncOpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = await client.chat.completions.create(**kwargs)\n except Exception as e:\n return OpenAIResp(\n message=Message(\n role=\"system\",\n content=f\"Error occurred: {e}\",\n ),\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n resp = OpenAIStreamingResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.delta.dict(exclude_none=True))\n return resp\n\n resp = OpenAIResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.message.dict(exclude_none=True))\n return resp\n\n def add_function(self, func_def: FunctionDefinition):\n self.functions.append(\n func_def.dict()\n ) # redefined dict() doesn't have exclude_none arg\n\n def add_single_message(self, msg: Message):\n if self.cur_role is not None and self.cur_content is not None:\n self.history.append(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n ).dict(exclude_none=True)\n )\n self.cur_role = None\n self.cur_content = None\n\n self.history.append(msg.dict(exclude_none=True))\n\n def add_system_message(self, content: str):\n self.add_single_message(\n Message(\n role=\"system\",\n content=content,\n )\n )\n\n def add_role(self, role: str):\n if self.cur_role is not None and self.cur_content is not None:\n self.add_single_message(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n )\n )\n\n self.cur_role = role\n\n def add_content(self, content: str):\n if self.cur_content is not None:\n self.cur_content += content\n else:\n self.cur_content = content"
},
{
"identifier": "Tools",
"path": "package/src/yorgassistant/core/assistant/tools/tools.py",
"snippet": "class Tools:\n tools: dict[str, Tool]\n\n def __init__(self):\n self.tools = {}\n # 获取调用此方法的栈帧\n stack = inspect.stack()\n caller_frame = stack[1]\n # 获取调用者的文件路径\n caller_path = caller_frame.filename\n # 获取调用者的目录路径\n caller_dir = os.path.dirname(caller_path)\n # 构建 openai.yaml 文件的绝对路径\n yaml_file_path = os.path.join(caller_dir, YamlPathConfig.tools_yaml_path)\n tools_yaml_path = yaml_file_path\n # 读取 tools.yaml 文件,初始化所有 tools\n with open(tools_yaml_path, \"r\") as f:\n config_obj = yaml.safe_load(f)\n for tool_name, tool_config in config_obj[\"tools\"].items():\n self.tools[tool_name] = Tool(config=ToolConfig(**tool_config))\n\n def set_tools_yaml_path(yaml_path:str):\n # 检查 yaml_path 是否为绝对路径\n if not os.path.isabs(yaml_path):\n # 获取调用此方法的栈帧\n stack = inspect.stack()\n caller_frame = stack[1]\n # 获取调用者的文件路径\n caller_path = caller_frame.filename\n # 获取调用者的目录路径\n caller_dir = os.path.dirname(caller_path)\n # 构建 yaml 文件的绝对路径\n full_yaml_path = os.path.join(caller_dir, yaml_path)\n else:\n full_yaml_path = yaml_path\n # 获取 yaml 文件所在的目录\n yaml_dir = os.path.dirname(full_yaml_path)\n # 如果目录不存在,则创建它\n os.makedirs(yaml_dir, exist_ok=True)\n # 设置 yaml_path\n YamlPathConfig.tools_yaml_path = full_yaml_path\n\n def get_tool(self, tool_name: str) -> Tool:\n # 找到对应的工具\n tool = self.tools.get(tool_name)\n if tool is None:\n raise ValueError(f\"No tool named {tool_name} found.\")\n\n return tool\n\n def get_tool_summary(self, tool_name: str) -> str:\n # 在 tools.yaml 文件中找到对应的工具\n tool = self.tools.get(tool_name)\n if tool is None:\n raise ValueError(f\"No tool named {tool_name} found.\")\n\n return tool.config.summary\n\n def get_tools_list_summary(self, tools_list: list[str]) -> dict[str, str]:\n tools_summary = {}\n for tool_name in tools_list:\n summary = self.get_tool_summary(tool_name)\n tools_summary[tool_name] = summary\n return tools_summary"
},
{
"identifier": "Tool",
"path": "package/src/yorgassistant/core/assistant/tools/tools.py",
"snippet": "class Tool:\n config: ToolConfig\n entity: BaseToolEntity\n _tool_type: str # 使用一个内部变量来存储 tool_type 的值\n\n def __init__(self, config: ToolConfig):\n self.config = config\n entity_name = config.entity_name\n\n if entity_name in FUNCTION_TOOL_ENTITIES:\n self.entity = FunctionToolEntity(FUNCTION_TOOL_ENTITIES[entity_name])\n self._tool_type = 'function'\n elif entity_name in STATEFUL_TOOL_ENTITIES:\n self.entity = STATEFUL_TOOL_ENTITIES[entity_name]()\n self._tool_type = 'stateful'\n else:\n raise Exception(f\"Tool entity {entity_name} not found.\")\n\n @property\n def tool_type(self):\n return self._tool_type\n\n @tool_type.setter\n def tool_type(self, value):\n self._tool_type = value\n # TODO: response check and type convert\n def call(self, **kwargs):\n return self.entity.call(**kwargs)\n\n def need_llm_generate_parameters(self) -> bool:\n return self.entity.need_llm_generate_parameters()\n\n def need_llm_generate_response(self) -> bool:\n return self.entity.need_llm_generate_response()\n\n def has_done(self) -> bool:\n return self.entity.current_state() == State.DONE"
}
] | import uuid
import time
import yaml
import os
import re
import logging
import json
import inspect
from typing import Any, List, Optional,Dict
from .assistant import Assistants
from ..nodes.openai.openai import OpenAINode,AsyncOpenAINode
from ..nodes.openai.openai_model import *
from .tools.tools import Tools, Tool
from .config import *
from .prompt.few_shot_cot_tools_choose_prompt import *
from .prompt.parameters_generate_prompt import *
from .prompt.response_generate_prompt import * | 6,211 |
def extract_bracket_content(s: str) -> list:
content = re.findall(r"\[(.*?)\]", s)
content = [c.replace("'", "") for c in content]
content = filter(lambda x: x != "", content)
ret = []
for item in content:
if "," in item:
ret.extend(item.split(","))
else:
ret.append(item)
return ret
class AsyncThreads:
|
def extract_bracket_content(s: str) -> list:
content = re.findall(r"\[(.*?)\]", s)
content = [c.replace("'", "") for c in content]
content = filter(lambda x: x != "", content)
ret = []
for item in content:
if "," in item:
ret.extend(item.split(","))
else:
ret.append(item)
return ret
class AsyncThreads: | current_tool: Tool | 4 | 2023-10-24 15:15:48+00:00 | 8k |
zju3dv/4K4D | scripts/preprocess/tools/align_cameras.py | [
{
"identifier": "as_torch_func",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def as_torch_func(func):\n def wrapper(*args, **kwargs):\n args = to_numpy(args)\n kwargs = to_numpy(kwargs)\n ret = func(*args, **kwargs)\n return to_tensor(ret)\n return wrapper"
},
{
"identifier": "average_c2ws",
"path": "easyvolcap/utils/cam_utils.py",
"snippet": "def average_c2ws(c2ws: np.ndarray, align_cameras: bool = True, look_at_center: bool = True) -> np.ndarray:\n \"\"\"\n Calculate the average pose, which is then used to center all poses\n using @center_poses. Its computation is as follows:\n 1. Compute the center: the average of pose centers.\n 2. Compute the z axis: the normalized average z axis.\n 3. Compute axis y': the average y axis.\n 4. Compute x' = y' cross product z, then normalize it as the x axis.\n 5. Compute the y axis: z cross product x.\n Note that at step 3, we cannot directly use y' as y axis since it's\n not necessarily orthogonal to z axis. We need to pass from x to y.\n Inputs:\n poses: (N_images, 3, 4)\n Outputs:\n pose_avg: (3, 4) the average pose\n \"\"\"\n\n if align_cameras:\n # 1. Compute the center\n center = compute_center_of_attention(c2ws)[..., 0] # (3)\n # 2. Compute the z axis\n z = -normalize(c2ws[..., 1].mean(0)) # (3) # FIXME: WHY?\n # 3. Compute axis y' (no need to normalize as it's not the final output)\n y_ = c2ws[..., 2].mean(0) # (3)\n # 4. Compute the x axis\n x = -normalize(np.cross(z, y_)) # (3)\n # 5. Compute the y axis (as z and x are normalized, y is already of norm 1)\n y = -np.cross(x, z) # (3)\n\n else:\n # 1. Compute the center\n center = c2ws[..., 3].mean(0) # (3)\n # 2. Compute the z axis\n if look_at_center:\n look = compute_center_of_attention(c2ws)[..., 0] # (3)\n z = normalize(look - center)\n else:\n z = normalize(c2ws[..., 2].mean(0)) # (3)\n # 3. Compute axis y' (no need to normalize as it's not the final output)\n y_ = c2ws[..., 1].mean(0) # (3)\n # 4. Compute the x axis\n x = -normalize(np.cross(z, y_)) # (3)\n # 5. Compute the y axis (as z and x are normalized, y is already of norm 1)\n y = -np.cross(x, z) # (3)\n\n c2w_avg = np.stack([x, y, z, center], 1) # (3, 4)\n return c2w_avg"
},
{
"identifier": "average_w2cs",
"path": "easyvolcap/utils/cam_utils.py",
"snippet": "def average_w2cs(w2cs: np.ndarray) -> np.ndarray:\n # Transform the world2camera extrinsic from matrix representation to vector representation\n rvecs = np.array([cv2.Rodrigues(w2c[:3, :3])[0] for w2c in w2cs], dtype=np.float32) # (V, 3, 1)\n tvecs = w2cs[:, :3, 3:] # (V, 3, 1)\n\n # Compute the average view direction and center in vector mode\n rvec_avg = rvecs.mean(axis=0) # (3, 1)\n tvec_avg = tvecs.mean(axis=0) # (3, 1)\n\n # Back to matrix representation\n w2c_avg = np.concatenate([cv2.Rodrigues(rvec_avg)[0], tvec_avg], axis=1)\n return w2c_avg"
},
{
"identifier": "read_camera",
"path": "easyvolcap/utils/easy_utils.py",
"snippet": "def read_camera(intri_path: str, extri_path: str = None, cam_names=[]) -> dotdict:\n if extri_path is None:\n extri_path = os.path.join(intri_path, 'extri.yml')\n intri_path = os.path.join(intri_path, 'intri.yml')\n assert os.path.exists(intri_path), intri_path\n assert os.path.exists(extri_path), extri_path\n\n intri = FileStorage(intri_path)\n extri = FileStorage(extri_path)\n cams = dotdict()\n cam_names = intri.read('names', dt='list')\n for cam in cam_names:\n # Intrinsics\n cams[cam] = dotdict()\n cams[cam].K = intri.read('K_{}'.format(cam))\n cams[cam].H = intri.read('H_{}'.format(cam), dt='real') or -1\n cams[cam].W = intri.read('W_{}'.format(cam), dt='real') or -1\n cams[cam].invK = np.linalg.inv(cams[cam]['K'])\n\n # Extrinsics\n Tvec = extri.read('T_{}'.format(cam))\n Rvec = extri.read('R_{}'.format(cam))\n if Rvec is not None: R = cv2.Rodrigues(Rvec)[0]\n else:\n R = extri.read('Rot_{}'.format(cam))\n Rvec = cv2.Rodrigues(R)[0]\n RT = np.hstack((R, Tvec))\n\n cams[cam].R = R\n cams[cam].T = Tvec\n cams[cam].C = - Rvec.T @ Tvec\n cams[cam].RT = RT\n cams[cam].Rvec = Rvec\n cams[cam].P = cams[cam].K @ cams[cam].RT\n\n # Distortion\n D = intri.read('D_{}'.format(cam))\n if D is None: D = intri.read('dist_{}'.format(cam))\n cams[cam].D = D\n\n # Time input\n cams[cam].t = extri.read('t_{}'.format(cam), dt='real') or 0 # temporal index, might all be 0\n cams[cam].v = extri.read('v_{}'.format(cam), dt='real') or 0 # temporal index, might all be 0\n\n # Bounds, could be overwritten\n cams[cam].n = extri.read('n_{}'.format(cam), dt='real') or 0.0001 # temporal index, might all be 0\n cams[cam].f = extri.read('f_{}'.format(cam), dt='real') or 1e6 # temporal index, might all be 0\n cams[cam].bounds = extri.read('bounds_{}'.format(cam))\n cams[cam].bounds = np.array([[-1e6, -1e6, -1e6], [1e6, 1e6, 1e6]]) if cams[cam].bounds is None else cams[cam].bounds\n\n # CCM\n cams[cam].ccm = intri.read('ccm_{}'.format(cam))\n cams[cam].ccm = np.eye(3) if cams[cam].ccm is None else cams[cam].ccm\n\n # # Average\n # avg_c2w_R = extri.read('avg_c2w_R')\n # avg_c2w_T = extri.read('avg_c2w_T')\n # if avg_c2w_R is not None: cams.avg_c2w_R = avg_c2w_R\n # if avg_c2w_T is not None: cams.avg_c2w_T = avg_c2w_T\n\n return dotdict(cams)"
},
{
"identifier": "write_camera",
"path": "easyvolcap/utils/easy_utils.py",
"snippet": "def write_camera(cameras: dict, path: str, intri_path: str = '', extri_path: str = ''):\n from os.path import join\n os.makedirs(path, exist_ok=True)\n if not intri_path or not extri_path:\n intri_name = join(path, 'intri.yml') # TODO: make them arguments\n extri_name = join(path, 'extri.yml')\n intri = FileStorage(intri_name, True)\n extri = FileStorage(extri_name, True)\n cam_names = [key_.split('.')[0] for key_ in cameras.keys()]\n intri.write('names', cam_names, 'list')\n extri.write('names', cam_names, 'list')\n\n cameras = dotdict(cameras)\n for key_, val in cameras.items():\n # Skip special keys\n if key_ == 'basenames': continue\n # if key_ == 'avg_R': continue\n # if key_ == 'avg_T': continue\n\n key = key_.split('.')[0]\n # Intrinsics\n intri.write('K_{}'.format(key), val.K)\n if 'H' in val: intri.write('H_{}'.format(key), val.H, 'real')\n if 'W' in val: intri.write('W_{}'.format(key), val.W, 'real')\n\n # Distortion\n if 'D' not in val:\n if 'dist' in val: val.D = val.dist\n else: val.D = np.zeros((5, 1))\n intri.write('D_{}'.format(key), val.D.reshape(5, 1))\n\n # Extrinsics\n if 'R' not in val: val.R = cv2.Rodrigues(val.Rvec)[0]\n if 'Rvec' not in val: val.Rvec = cv2.Rodrigues(val.R)[0]\n extri.write('R_{}'.format(key), val.Rvec)\n extri.write('Rot_{}'.format(key), val.R)\n extri.write('T_{}'.format(key), val.T.reshape(3, 1))\n\n # Temporal\n if 't' in val: extri.write('t_{}'.format(key), val.t, 'real')\n\n # Bounds\n if 'n' in val: extri.write('n_{}'.format(key), val.n, 'real')\n if 'f' in val: extri.write('f_{}'.format(key), val.f, 'real')\n if 'bounds' in val: extri.write('bounds_{}'.format(key), val.bounds)\n\n # Color correction matrix\n if 'ccm' in val: intri.write('ccm_{}'.format(key), val.ccm)\n\n # # Averaged camera matrix (optional)\n # if 'c2w_avg' in cameras:\n # cameras.avg_R = cameras.c2w_avg[:3, :3]\n # cameras.avg_T = cameras.c2w_avg[:3, 3:]\n # if 'avg_R' in cameras and 'avg_T' in cameras:\n # extri.write('avg_R'.format(key), cameras.avg_R)\n # extri.write('avg_T'.format(key), cameras.avg_T.reshape(3, 1))"
},
{
"identifier": "to_easymocap",
"path": "easyvolcap/utils/easy_utils.py",
"snippet": "def to_easymocap(Ks: torch.Tensor, Hs: torch.Tensor, Ws: torch.Tensor,\n Rs: torch.Tensor, Ts: torch.Tensor, ts: torch.Tensor,\n ns: torch.Tensor, fs: torch.Tensor, Ds: torch.Tensor = None,\n cam_digit: int = 5):\n # Number of render views\n n_render_views = Ks.shape[0]\n\n # Convert interpolated render path to easymocap format\n cameras = dotdict()\n for i in range(n_render_views):\n cam = dotdict()\n cam.K, cam.H, cam.W = Ks[i, 0], Hs[i, 0], Ws[i, 0]\n cam.R, cam.T = Rs[i, 0], Ts[i, 0]\n cam.t = ts[i, 0] if len(ts.shape) > 1 else ts[i]\n cam.n, cam.f = ns[i, 0], fs[i, 0]\n cam.D = Ds[i, 0] if Ds is not None else np.zeros((5, 1))\n cameras[f'{i:0{cam_digit}d}'] = to_numpy(cam)\n\n # Return the easymocap format cameras\n return cameras"
},
{
"identifier": "affine_inverse",
"path": "easyvolcap/utils/net_utils.py",
"snippet": "@torch.jit.script\ndef affine_inverse(A: torch.Tensor):\n R = A[..., :3, :3] # ..., 3, 3\n T = A[..., :3, 3:] # ..., 3, 1\n P = A[..., 3:, :] # ..., 1, 4\n return torch.cat([torch.cat([R.mT, -R.mT @ T], dim=-1), P], dim=-2)"
},
{
"identifier": "monotonic_near_far",
"path": "easyvolcap/utils/net_utils.py",
"snippet": "@torch.jit.script\ndef monotonic_near_far(near: torch.Tensor, far: torch.Tensor, n: torch.Tensor, f: torch.Tensor):\n n = n[..., None, None]\n f = f[..., None, None]\n near, far = near.clip(n, f), far.clip(n, f)\n valid_mask = near < far\n valid_near_plane = torch.where(valid_mask, near, f).min()\n valid_far_plane = torch.where(valid_mask, far, n).max()\n near, far = torch.where(valid_mask, near, valid_near_plane), torch.where(valid_mask, far, valid_far_plane) # what ever for these points\n near, far = near.clip(n, f), far.clip(n, f)\n return near, far"
},
{
"identifier": "affine_padding",
"path": "easyvolcap/utils/net_utils.py",
"snippet": "@torch.jit.script\ndef affine_padding(c2w: torch.Tensor):\n sh = c2w.shape\n pad0 = c2w.new_zeros(sh[:-2] + (1, 3)) # B, 1, 3\n pad1 = c2w.new_ones(sh[:-2] + (1, 1)) # B, 1, 1\n pad = torch.cat([pad0, pad1], dim=-1) # B, 1, 4\n ext = torch.cat([c2w, pad], dim=-2) # B, 4, 4\n return ext"
}
] | import torch
import argparse
import numpy as np
from os.path import join
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.data_utils import as_torch_func
from easyvolcap.utils.cam_utils import average_c2ws, average_w2cs
from easyvolcap.utils.easy_utils import read_camera, write_camera, to_easymocap
from easyvolcap.utils.net_utils import affine_inverse, monotonic_near_far, affine_padding | 5,186 | # This script is used to perform camera alignment for a given `easyvolcap` format dataset.
# Namely, it does the same things as in `VolumetricVideoDataset.align_points()`, this script
# is just a standalone version of that function for you to export the aligned cameras.
def load_align_cameras(data_root: str, intri_file: str, extri_file: str, camera_dir: str = 'cameras',
n_frame_total: int = 1, near: float = 0.2, far: float = 100.0,
avg_using_all: bool = False, avg_max_count: int = 100):
# Multiview dataset loading, need to expand, will have redundant information
if exists(join(data_root, intri_file)) and exists(join(data_root, extri_file)):
cameras = read_camera(join(data_root, intri_file), join(data_root, extri_file))
camera_names = np.asarray(sorted(list(cameras.keys()))) # NOTE: sorting camera names
cameras = dotdict({k: [cameras[k] for i in range(n_frame_total)] for k in camera_names})
# Monocular dataset loading, each camera has a separate folder
elif exists(join(data_root, camera_dir)):
camera_names = np.asarray(sorted(os.listdir(join(data_root, camera_dir)))) # NOTE: sorting here is very important!
cameras = dotdict({
k: [v[1] for v in sorted(
read_camera(join(data_root, camera_dir, k, intri_file),
join(data_root, camera_dir, k, extri_file)).items()
)] for k in camera_names
})
# Whatever else, for now, raise error
else: raise NotImplementedError(f'Could not find [intri.yml, extri.yml] or [cameras] folder in {data_root}, check your dataset configuration')
# cameras: a mapping from camera names to a list of camera objects, (every element in list is an actual camera for that particular view and frame)
Hs = torch.as_tensor([[cam.H for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F)
Ws = torch.as_tensor([[cam.W for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F)
Ks = torch.as_tensor([[cam.K for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F, 3, 3)
Rs = torch.as_tensor([[cam.R for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F, 3, 3)
Ts = torch.as_tensor([[cam.T for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F, 3, 1)
Ds = torch.as_tensor([[cam.D for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F, 1, 5)
ts = torch.as_tensor([[cam.t for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F) # UNUSED: time index from camera, not used for now
ns = torch.as_tensor([[cam.n for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F)
fs = torch.as_tensor([[cam.f for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F)
w2cs = torch.cat([Rs, Ts], dim=-1) # (V, F, 3, 4)
c2ws = affine_inverse(w2cs) # (V, F, 3, 4)
ns, fs = monotonic_near_far(ns, fs, torch.as_tensor(near, dtype=torch.float), torch.as_tensor(far, dtype=torch.float))
# Move cameras to the center of the frame (!: intrusive)
c2ws, w2cs, Rs, Ts, c2w_avg = align_points(c2ws, avg_using_all, avg_max_count)
# Return the aligned cameras
return Ks, Hs, Ws, Rs, Ts, ts, ns, fs, Ds
def align_points(c2ws: torch.Tensor, avg_using_all: bool = False, avg_max_count: int = 100):
sh = c2ws.shape # (V, F, 3, 4)
c2ws = c2ws.view((-1,) + sh[-2:]) # (V*F, 3, 4)
if avg_using_all:
stride = max(len(c2ws) // avg_max_count, 1)
inds = torch.arange(len(c2ws))[::stride][:avg_max_count]
c2w_avg = as_torch_func(average_c2ws)(c2ws[inds]) # (V*F, 3, 4), # !: HEAVY
else:
c2w_avg = as_torch_func(average_c2ws)(c2ws.view(sh)[:, 0]) # (V, 3, 4)
c2w_avg = c2w_avg
c2ws = (affine_inverse(affine_padding(c2w_avg))[None] @ affine_padding(c2ws))[..., :3, :] # (1, 4, 4) @ (V*F, 4, 4) -> (V*F, 3, 4)
w2cs = affine_inverse(c2ws) # (V*F, 3, 4)
c2ws = c2ws.view(sh)
w2cs = w2cs.view(sh)
Rs = w2cs[..., :-1]
Ts = w2cs[..., -1:]
return c2ws, w2cs, Rs, Ts, c2w_avg
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='data/webcam/simple/light/calib_gather_230928/colmap/align/static/images')
parser.add_argument('--intri_file', type=str, default='intri.yml')
parser.add_argument('--extri_file', type=str, default='extri.yml')
parser.add_argument('--camera_dir', type=str, default='cameras')
parser.add_argument('--n_frame_total', type=int, default=1)
parser.add_argument('--near', type=float, default=0.25)
parser.add_argument('--far', type=float, default=2.00)
parser.add_argument('--avg_using_all', action='store_true')
parser.add_argument('--avg_max_count', type=int, default=100)
parser.add_argument('--cam_digit', type=int, default=1)
parser.add_argument('--save_root', type=str, default='data/webcam/simple/light/calib_gather_230928/aligned')
args = parser.parse_args()
# Load and align cameras
Ks, Hs, Ws, Rs, Ts, ts, ns, fs, Ds = load_align_cameras(
args.data_root, args.intri_file, args.extri_file, args.camera_dir,
args.n_frame_total, args.near, args.far, args.avg_using_all, args.avg_max_count
)
# Convert loaded and aligned cameras to `EasyMocap` format
# TODO: support for monocular cameras
| # This script is used to perform camera alignment for a given `easyvolcap` format dataset.
# Namely, it does the same things as in `VolumetricVideoDataset.align_points()`, this script
# is just a standalone version of that function for you to export the aligned cameras.
def load_align_cameras(data_root: str, intri_file: str, extri_file: str, camera_dir: str = 'cameras',
n_frame_total: int = 1, near: float = 0.2, far: float = 100.0,
avg_using_all: bool = False, avg_max_count: int = 100):
# Multiview dataset loading, need to expand, will have redundant information
if exists(join(data_root, intri_file)) and exists(join(data_root, extri_file)):
cameras = read_camera(join(data_root, intri_file), join(data_root, extri_file))
camera_names = np.asarray(sorted(list(cameras.keys()))) # NOTE: sorting camera names
cameras = dotdict({k: [cameras[k] for i in range(n_frame_total)] for k in camera_names})
# Monocular dataset loading, each camera has a separate folder
elif exists(join(data_root, camera_dir)):
camera_names = np.asarray(sorted(os.listdir(join(data_root, camera_dir)))) # NOTE: sorting here is very important!
cameras = dotdict({
k: [v[1] for v in sorted(
read_camera(join(data_root, camera_dir, k, intri_file),
join(data_root, camera_dir, k, extri_file)).items()
)] for k in camera_names
})
# Whatever else, for now, raise error
else: raise NotImplementedError(f'Could not find [intri.yml, extri.yml] or [cameras] folder in {data_root}, check your dataset configuration')
# cameras: a mapping from camera names to a list of camera objects, (every element in list is an actual camera for that particular view and frame)
Hs = torch.as_tensor([[cam.H for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F)
Ws = torch.as_tensor([[cam.W for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F)
Ks = torch.as_tensor([[cam.K for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F, 3, 3)
Rs = torch.as_tensor([[cam.R for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F, 3, 3)
Ts = torch.as_tensor([[cam.T for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F, 3, 1)
Ds = torch.as_tensor([[cam.D for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F, 1, 5)
ts = torch.as_tensor([[cam.t for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F) # UNUSED: time index from camera, not used for now
ns = torch.as_tensor([[cam.n for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F)
fs = torch.as_tensor([[cam.f for cam in cameras[k]] for k in camera_names], dtype=torch.float) # (V, F)
w2cs = torch.cat([Rs, Ts], dim=-1) # (V, F, 3, 4)
c2ws = affine_inverse(w2cs) # (V, F, 3, 4)
ns, fs = monotonic_near_far(ns, fs, torch.as_tensor(near, dtype=torch.float), torch.as_tensor(far, dtype=torch.float))
# Move cameras to the center of the frame (!: intrusive)
c2ws, w2cs, Rs, Ts, c2w_avg = align_points(c2ws, avg_using_all, avg_max_count)
# Return the aligned cameras
return Ks, Hs, Ws, Rs, Ts, ts, ns, fs, Ds
def align_points(c2ws: torch.Tensor, avg_using_all: bool = False, avg_max_count: int = 100):
sh = c2ws.shape # (V, F, 3, 4)
c2ws = c2ws.view((-1,) + sh[-2:]) # (V*F, 3, 4)
if avg_using_all:
stride = max(len(c2ws) // avg_max_count, 1)
inds = torch.arange(len(c2ws))[::stride][:avg_max_count]
c2w_avg = as_torch_func(average_c2ws)(c2ws[inds]) # (V*F, 3, 4), # !: HEAVY
else:
c2w_avg = as_torch_func(average_c2ws)(c2ws.view(sh)[:, 0]) # (V, 3, 4)
c2w_avg = c2w_avg
c2ws = (affine_inverse(affine_padding(c2w_avg))[None] @ affine_padding(c2ws))[..., :3, :] # (1, 4, 4) @ (V*F, 4, 4) -> (V*F, 3, 4)
w2cs = affine_inverse(c2ws) # (V*F, 3, 4)
c2ws = c2ws.view(sh)
w2cs = w2cs.view(sh)
Rs = w2cs[..., :-1]
Ts = w2cs[..., -1:]
return c2ws, w2cs, Rs, Ts, c2w_avg
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='data/webcam/simple/light/calib_gather_230928/colmap/align/static/images')
parser.add_argument('--intri_file', type=str, default='intri.yml')
parser.add_argument('--extri_file', type=str, default='extri.yml')
parser.add_argument('--camera_dir', type=str, default='cameras')
parser.add_argument('--n_frame_total', type=int, default=1)
parser.add_argument('--near', type=float, default=0.25)
parser.add_argument('--far', type=float, default=2.00)
parser.add_argument('--avg_using_all', action='store_true')
parser.add_argument('--avg_max_count', type=int, default=100)
parser.add_argument('--cam_digit', type=int, default=1)
parser.add_argument('--save_root', type=str, default='data/webcam/simple/light/calib_gather_230928/aligned')
args = parser.parse_args()
# Load and align cameras
Ks, Hs, Ws, Rs, Ts, ts, ns, fs, Ds = load_align_cameras(
args.data_root, args.intri_file, args.extri_file, args.camera_dir,
args.n_frame_total, args.near, args.far, args.avg_using_all, args.avg_max_count
)
# Convert loaded and aligned cameras to `EasyMocap` format
# TODO: support for monocular cameras | cameras = to_easymocap(Ks, Hs, Ws, Rs, Ts, ts, ns, fs, Ds, cam_digit=args.cam_digit) | 5 | 2023-10-17 04:48:46+00:00 | 8k |
chengzeyi/stable-fast | src/sfast/triton/torch_ops.py | [
{
"identifier": "copy",
"path": "src/sfast/triton/ops/copy.py",
"snippet": "def copy(dst, src):\n dst_device = dst.device\n src_device = src.device\n assert dst_device.type == 'cuda'\n assert dst_device == src_device\n dst_shape = dst.shape\n src_shape = src.shape\n assert dst_shape == src_shape\n\n dst_strides = dst.stride()\n src_strides = src.stride()\n\n ndim = dst.ndim\n if ndim in (1, 2):\n if dst.ndim == 1:\n dst = dst[None, :]\n src = src[None, :]\n\n bsz, sz0 = dst_shape\n bsd, sd0 = dst_strides\n bss, ss0 = src_strides\n\n def grid(meta):\n return (triton.cdiv(sz0, meta['BLOCK_M']), bsz)\n\n copy_2d_kernel[grid](\n dst,\n src,\n bsz,\n sz0,\n bss,\n ss0,\n bsd,\n sd0,\n )\n elif ndim == 3:\n bs, sz0, sz1 = dst_shape\n bsd, sd0, sd1 = dst_strides\n bss, ss0, ss1 = src_strides\n\n def grid(meta):\n return (triton.cdiv(sz0, meta['BLOCK_M']) *\n triton.cdiv(sz1, meta['BLOCK_N']), bs)\n\n copy_3d_kernel[grid](\n dst,\n src,\n bs,\n sz0,\n sz1,\n bss,\n ss0,\n ss1,\n bsd,\n sd0,\n sd1,\n )\n elif ndim == 4:\n bs, sz0, sz1, sz2 = dst_shape\n bsd, sd0, sd1, sd2 = dst_strides\n bss, ss0, ss1, ss2 = src_strides\n\n def grid(meta):\n return (triton.cdiv(sz0, meta['BLOCK_M']) *\n triton.cdiv(sz1, meta['BLOCK_N']) *\n triton.cdiv(sz2, meta['BLOCK_K']), bs)\n\n copy_4d_kernel[grid](\n dst,\n src,\n bs,\n sz0,\n sz1,\n sz2,\n bss,\n ss0,\n ss1,\n ss2,\n bsd,\n sd0,\n sd1,\n sd2,\n )\n else:\n raise NotImplementedError\n\n return dst"
},
{
"identifier": "group_norm_forward",
"path": "src/sfast/triton/ops/group_norm.py",
"snippet": "def group_norm_4d_forward_kernel(\n input_ptr,\n gamma_ptr,\n beta_ptr,\n N,\n C,\n HxW,\n groups,\n eps,\n output_ptr,\n mean_ptr,\n rstd_ptr,\n C_G,\n GROUP_SIZE,\n BLOCK_SIZE: tl.constexpr,\n):\ndef create_group_norm_4d_forward_kernel(act=activation.identity):\ndef group_norm_4d_channels_last_forward_collect_stats_kernel(\n input_ptr,\n N,\n C,\n HxW,\n groups,\n eps,\n mean_ptr,\n rstd_ptr,\n C_G,\n ROW_SIZE: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n):\ndef group_norm_4d_channels_last_forward_collect_stats_kernel_stage_1(\n input_ptr,\n N,\n C,\n HxW,\n groups,\n cluster_size,\n cluster_num,\n cluster_mean_ptr,\n cluster_m2_ptr,\n cluster_weight_ptr,\n C_G,\n ROW_SIZE: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n):\ndef group_norm_4d_channels_last_forward_collect_stats_kernel_stage_2(\n cluster_mean_ptr,\n cluster_m2_ptr,\n cluster_weight_ptr,\n N,\n groups,\n cluster_num,\n eps,\n mean_ptr,\n rstd_ptr,\n BLOCK_SIZE: tl.constexpr,\n):\ndef group_norm_4d_channels_last_forward_apply_kernel(\n input_ptr,\n gamma_ptr,\n beta_ptr,\n mean_ptr,\n rstd_ptr,\n N,\n C,\n HxW,\n groups,\n eps,\n output_ptr,\n C_G,\n ROW_SIZE: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n):\ndef create_group_norm_4d_channels_last_forward_apply_kernel(\n act=activation.identity):\ndef create_group_norm_forward(act=activation.identity):\n def group_norm_forward(input,\n num_groups,\n weight=None,\n bias=None,\n eps=1e-05,\n output_mean=True,\n output_rstd=True):\n def grid(meta):\n def grid(meta):\n def grid(meta):\n def test_group_norm():\n X = input_ptr + offset\n Y = output_ptr + offset\n X = input_ptr + offset\n X = input_ptr + offset\n X = input_ptr + offset\n Y = output_ptr + offset\n N, C, H, W = shape"
},
{
"identifier": "LayerNorm",
"path": "src/sfast/triton/ops/layer_norm.py",
"snippet": "class LayerNorm(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, x, normalized_shape, weight, bias, eps):\n x = x.contiguous()\n weight = weight.contiguous() if weight is not None else None\n bias = bias.contiguous() if bias is not None else None\n # allocate output\n y = torch.empty_like(x)\n\n N = functools.reduce(operator.mul, normalized_shape, 1)\n # reshape input data into 2D tensor\n x_arg = x.reshape(-1, N)\n M, N = x_arg.shape\n needs_backward = any(x is not None and x.requires_grad\n for x in [x, weight, bias])\n if needs_backward:\n mean = torch.empty((M, ), dtype=x.dtype, device=x.device)\n rstd = torch.empty((M, ), dtype=x.dtype, device=x.device)\n else:\n mean, rstd = None, None\n # Less than 64KB per feature: enqueue fused kernel\n MAX_FUSED_SIZE = 65536 // x.element_size()\n BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))\n if N > BLOCK_SIZE:\n raise RuntimeError(\n \"This layer norm doesn't support feature dim >= 64KB.\")\n # heuristics for number of warps\n num_warps = min(max(BLOCK_SIZE // 256, 1), 16)\n # enqueue kernel\n _layer_norm_fwd_fused[(M, )]( #\n x_arg,\n y,\n weight,\n bias,\n mean,\n rstd, #\n x_arg.stride(0),\n N,\n eps, #\n BLOCK_SIZE=BLOCK_SIZE,\n num_warps=num_warps,\n # num_ctas=1,\n )\n ctx.save_for_backward(x, weight, bias, mean, rstd)\n ctx.BLOCK_SIZE = BLOCK_SIZE\n ctx.num_warps = num_warps\n ctx.eps = eps\n ctx.normalized_shape = normalized_shape\n return y\n\n @staticmethod\n def backward(ctx, dy):\n dy.contiguous()\n x, w, b, m, v = ctx.saved_tensors\n x = x.contiguous()\n w = w.contiguous() if w is not None else None\n b = b.contiguous() if b is not None else None\n m = m.contiguous()\n v = v.contiguous()\n\n grad_input_mask = (ctx.needs_input_grad[0], ctx.needs_input_grad[2],\n ctx.needs_input_grad[3])\n grad_inputs = aten.native_layer_norm_backward(dy, x,\n ctx.normalized_shape, m,\n v, w, b, grad_input_mask)\n dx, dw, db = grad_inputs\n return dx, None, dw, db, None\n\n M = m.numel()\n N = x.numel() // M\n # heuristics for amount of parallel reduction stream for DW/DB\n # N = w.shape[0]\n GROUP_SIZE_M = 64\n if N <= 8192:\n GROUP_SIZE_M = 96\n if N <= 4096:\n GROUP_SIZE_M = 128\n if N <= 1024:\n GROUP_SIZE_M = 256\n # allocate output\n locks = torch.zeros(2 * GROUP_SIZE_M, dtype=torch.int32, device='cuda')\n _dw = torch.empty((GROUP_SIZE_M, w.shape[0]),\n dtype=x.dtype,\n device=w.device)\n _db = torch.empty((GROUP_SIZE_M, w.shape[0]),\n dtype=x.dtype,\n device=w.device)\n dw = torch.empty((w.shape[0], ), dtype=w.dtype, device=w.device)\n db = torch.empty((w.shape[0], ), dtype=w.dtype, device=w.device)\n dx = torch.empty_like(dy)\n # enqueue kernel using forward pass heuristics\n # also compute partial sums for DW and DB\n x_arg = x.reshape(-1, x.shape[-1])\n M, N = x_arg.shape\n _layer_norm_bwd_dx_fused[(M, )]( #\n dx,\n dy,\n _dw,\n _db,\n x,\n w,\n b,\n m,\n v,\n locks, #\n x_arg.stride(0),\n N,\n ctx.eps, #\n BLOCK_SIZE_N=ctx.BLOCK_SIZE, #\n GROUP_SIZE_M=GROUP_SIZE_M, #\n num_warps=ctx.num_warps)\n\n def grid(meta):\n return [triton.cdiv(N, meta['BLOCK_SIZE_N'])]\n\n # accumulate partial sums in separate kernel\n _layer_norm_bwd_dwdb[grid](\n _dw,\n _db,\n dw,\n db,\n GROUP_SIZE_M,\n N, #\n BLOCK_SIZE_M=32, #\n BLOCK_SIZE_N=128,\n # num_ctas=1,\n )\n return dx, None, dw, db, None"
},
{
"identifier": "conv_forward",
"path": "src/sfast/triton/ops/conv.py",
"snippet": "def conv_heuristics():\ndef _unpack(idx, order, shape):\ndef estimate_conv_time(\n # backend, device,\n num_warps,\n num_stages,\n x,\n BATCH,\n IN_C,\n IN_H,\n IN_W,\n KERNEL_N,\n KERNEL_H,\n KERNEL_W,\n OUT_H,\n OUT_W,\n BLOCK_M,\n BLOCK_K,\n BLOCK_N,\n debug=False,\n **kwargs,\n):\ndef early_config_prune(configs, named_args):\ndef _kernel_delta_x_hwc(\n x,\n w,\n bias,\n y,\n # stride of tensor\n stride_xn,\n stride_xc,\n stride_xh,\n stride_xw,\n stride_wn,\n stride_wc,\n stride_wh,\n stride_ww,\n stride_yn,\n stride_yc,\n stride_yh,\n stride_yw,\n # pointer inc for x\n delta_xh_ptr,\n delta_xw_ptr,\n delta_xc_ptr,\n # Tensor dimensions\n BATCH,\n IN_C,\n IN_H,\n IN_W,\n KERNEL_N,\n KERNEL_H,\n KERNEL_W,\n OUT_H,\n OUT_W,\n # parameters of conv\n stride_h,\n stride_w,\n padding_h,\n padding_w,\n dilation_h,\n dilation_w,\n output_padding_h,\n output_padding_w,\n groups,\n # Metaparameters\n ACC_TYPE: tl.constexpr,\n CONV1X1_NHWC: tl.constexpr,\n # blocks in different dimension\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n # reduction tiling parameter for matmul\n BLOCK_K: tl.constexpr,\n # Super-blocking for better L2 peformance\n GROUP_H: tl.constexpr,\n WITH_BIAS: tl.constexpr,\n):\ndef _kernel_delta_x(\n x,\n w,\n bias,\n y,\n # stride of tensor\n stride_xn,\n stride_xc,\n stride_xh,\n stride_xw,\n stride_wn,\n stride_wc,\n stride_wh,\n stride_ww,\n stride_yn,\n stride_yc,\n stride_yh,\n stride_yw,\n # pointer inc for x\n delta_x_ptr,\n # Tensor dimensions\n BATCH,\n IN_C,\n IN_H,\n IN_W,\n KERNEL_N,\n KERNEL_H,\n KERNEL_W,\n OUT_H,\n OUT_W,\n # parameters of conv\n stride_h,\n stride_w,\n padding_h,\n padding_w,\n dilation_h,\n dilation_w,\n output_padding_h,\n output_padding_w,\n groups,\n # Metaparameters\n ACC_TYPE: tl.constexpr,\n CONV1X1_NHWC: tl.constexpr,\n # blocks in different dimension\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n # reduction tiling parameter for matmul\n BLOCK_K: tl.constexpr,\n # Super-blocking for better L2 peformance\n GROUP_H: tl.constexpr,\n WITH_BIAS: tl.constexpr,\n):\n def _delta_x_ptr_hwc(\n IN_C,\n KERNEL_H,\n KERNEL_W,\n dilation_h,\n dilation_w,\n stride_wc,\n stride_wh,\n stride_ww,\n stride_xc,\n stride_xh,\n stride_xw,\n device,\n ):\n def _delta_x_ptr(\n IN_C,\n KERNEL_H,\n KERNEL_W,\n dilation_h,\n dilation_w,\n stride_wc,\n stride_wh,\n stride_ww,\n stride_xc,\n stride_xh,\n stride_xw,\n device,\n ):\n def _call(\n x,\n w,\n bias,\n stride,\n padding,\n dilation,\n transposed,\n output_padding,\n groups,\n ):\n def grid(META):\n def forward(\n x,\n w,\n bias,\n stride=(1, 1),\n padding=(0, 0),\n dilation=(1, 1),\n transposed=False,\n output_padding=(0, 0),\n groups=1,\n ):\n M = BATCH * OUT_H * OUT_W\n N = KERNEL_N\n K = KERNEL_H * KERNEL_W * IN_C\n M, N = max(M, BLOCK_M), max(N, BLOCK_N)\n CRS = IN_C * KERNEL_H * KERNEL_W\n CRS = IN_C * KERNEL_H * KERNEL_W\n BATCH = shape_x[xn]\n IN_C = shape_x[xc]\n IN_H = shape_x[xh]\n IN_W = shape_x[xw]\n KERNEL_N = shape_w[wn]\n KERNEL_H = shape_w[wh]\n KERNEL_W = shape_w[ww]\n OUT_H = shape_y[yh]\n OUT_W = shape_y[yw]\n ACC_TYPE = tl.float32\n ACC_TYPE = tl.float16\n ACC_TYPE = tl.float64\n ACC_TYPE = tl.int32\n CONV1X1_NHWC = False\n CONV1X1_NHWC = True\n DELTA_X_PTR_HWC = (False if\n ((padding[0] == 0 and padding[1] == 0) or\n (KERNEL_H == 1 and KERNEL_W == 1)) else True)\nclass _conv:"
}
] | import torch
import sfast
from sfast.utils.custom_python_operator import register_custom_python_operator
from .ops.copy import copy
from .ops.group_norm import (group_norm_forward, group_norm_silu_forward)
from .ops.layer_norm import LayerNorm as TritonLayerNorm
from .ops.conv import conv_forward | 4,803 | aten = torch.ops.aten
def construct_triton_contiguous_torch_op():
class TritonContiguous(torch.autograd.Function):
@staticmethod
def forward(ctx, x, memory_format=torch.contiguous_format):
if x.device.type != 'cuda' or x.ndim > 4 or x.is_contiguous(
memory_format=memory_format):
return aten.contiguous(x, memory_format=memory_format)
else:
dst = torch.empty_like(x, memory_format=memory_format)
return copy(dst, x)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
def contiguous(x, memory_format=torch.contiguous_format):
return TritonContiguous.apply(x, memory_format)
return contiguous
contiguous = construct_triton_contiguous_torch_op()
register_custom_python_operator(
'sfast_triton::contiguous(Tensor a, MemoryFormat memory_format) -> Tensor',
contiguous)
def constuct_triton_clone_torch_op():
class TritonClone(torch.autograd.Function):
@staticmethod
def forward(ctx, x, memory_format=torch.preserve_format):
if x.device.type != 'cuda' or x.ndim > 4 or x.is_contiguous(
memory_format=memory_format):
return aten.clone(x, memory_format=memory_format)
else:
dst = torch.empty_like(x, memory_format=memory_format)
return copy(dst, x)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
def clone(x, memory_format=torch.preserve_format):
return TritonClone.apply(x, memory_format)
return clone
clone = constuct_triton_clone_torch_op()
register_custom_python_operator(
'sfast_triton::clone(Tensor a, MemoryFormat memory_format) -> Tensor',
clone)
def construct_triton_reshape_torch_op():
class TritonReshape(torch.autograd.Function):
@staticmethod
def forward(ctx, x, shape):
ctx.shape = x.shape
if x.device.type != 'cuda' or x.ndim > 4 or sfast._C._compute_stride(
x.shape, x.stride(), shape) is not None:
return aten.reshape(x, shape)
else:
dst = torch.empty_like(x,
memory_format=torch.contiguous_format)
copy(dst, x)
return aten.reshape(dst, shape)
@staticmethod
def backward(ctx, grad_output):
if grad_output.device.type != 'cuda' or grad_output.ndim > 4 or sfast._C._compute_stride(
grad_output.shape, grad_output.stride(),
ctx.shape) is not None:
return grad_output.reshape(ctx.shape), None
else:
dst = torch.empty_like(grad_output,
memory_format=torch.contiguous_format)
copy(dst, grad_output)
return dst.reshape(ctx.shape), None
def reshape(x, shape):
return TritonReshape.apply(x, shape)
return reshape
reshape = construct_triton_reshape_torch_op()
register_custom_python_operator(
'sfast_triton::reshape(Tensor a, int[] shape) -> Tensor', reshape)
def construct_triton_group_norm_torch_op():
class TritonGroupNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, input, num_groups, weight=None, bias=None, eps=1e-05):
device_type = input.device.type
if device_type != 'cuda' or input.ndim > 4:
input = input.contiguous()
if weight is not None:
weight = weight.contiguous()
if bias is not None:
bias = bias.contiguous()
N, C = input.shape[:2]
HxW = input.numel() // (N * C)
output, mean, rstd = aten.native_group_norm(
input, weight, bias, N, C, HxW, num_groups, eps)
else:
needs_backward = any(x is not None and x.requires_grad
for x in [input, weight, bias])
|
aten = torch.ops.aten
def construct_triton_contiguous_torch_op():
class TritonContiguous(torch.autograd.Function):
@staticmethod
def forward(ctx, x, memory_format=torch.contiguous_format):
if x.device.type != 'cuda' or x.ndim > 4 or x.is_contiguous(
memory_format=memory_format):
return aten.contiguous(x, memory_format=memory_format)
else:
dst = torch.empty_like(x, memory_format=memory_format)
return copy(dst, x)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
def contiguous(x, memory_format=torch.contiguous_format):
return TritonContiguous.apply(x, memory_format)
return contiguous
contiguous = construct_triton_contiguous_torch_op()
register_custom_python_operator(
'sfast_triton::contiguous(Tensor a, MemoryFormat memory_format) -> Tensor',
contiguous)
def constuct_triton_clone_torch_op():
class TritonClone(torch.autograd.Function):
@staticmethod
def forward(ctx, x, memory_format=torch.preserve_format):
if x.device.type != 'cuda' or x.ndim > 4 or x.is_contiguous(
memory_format=memory_format):
return aten.clone(x, memory_format=memory_format)
else:
dst = torch.empty_like(x, memory_format=memory_format)
return copy(dst, x)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
def clone(x, memory_format=torch.preserve_format):
return TritonClone.apply(x, memory_format)
return clone
clone = constuct_triton_clone_torch_op()
register_custom_python_operator(
'sfast_triton::clone(Tensor a, MemoryFormat memory_format) -> Tensor',
clone)
def construct_triton_reshape_torch_op():
class TritonReshape(torch.autograd.Function):
@staticmethod
def forward(ctx, x, shape):
ctx.shape = x.shape
if x.device.type != 'cuda' or x.ndim > 4 or sfast._C._compute_stride(
x.shape, x.stride(), shape) is not None:
return aten.reshape(x, shape)
else:
dst = torch.empty_like(x,
memory_format=torch.contiguous_format)
copy(dst, x)
return aten.reshape(dst, shape)
@staticmethod
def backward(ctx, grad_output):
if grad_output.device.type != 'cuda' or grad_output.ndim > 4 or sfast._C._compute_stride(
grad_output.shape, grad_output.stride(),
ctx.shape) is not None:
return grad_output.reshape(ctx.shape), None
else:
dst = torch.empty_like(grad_output,
memory_format=torch.contiguous_format)
copy(dst, grad_output)
return dst.reshape(ctx.shape), None
def reshape(x, shape):
return TritonReshape.apply(x, shape)
return reshape
reshape = construct_triton_reshape_torch_op()
register_custom_python_operator(
'sfast_triton::reshape(Tensor a, int[] shape) -> Tensor', reshape)
def construct_triton_group_norm_torch_op():
class TritonGroupNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, input, num_groups, weight=None, bias=None, eps=1e-05):
device_type = input.device.type
if device_type != 'cuda' or input.ndim > 4:
input = input.contiguous()
if weight is not None:
weight = weight.contiguous()
if bias is not None:
bias = bias.contiguous()
N, C = input.shape[:2]
HxW = input.numel() // (N * C)
output, mean, rstd = aten.native_group_norm(
input, weight, bias, N, C, HxW, num_groups, eps)
else:
needs_backward = any(x is not None and x.requires_grad
for x in [input, weight, bias]) | output, mean, rstd = group_norm_forward( | 1 | 2023-10-17 06:49:59+00:00 | 8k |
microsoft/SoM | demo_som.py | [
{
"identifier": "interactive_seem_m2m_auto",
"path": "task_adapter/seem/tasks/interactive_seem_m2m_auto.py",
"snippet": "def interactive_seem_m2m_auto(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n mask_generator = SeemAutomaticMaskGenerator(model)\n outputs = mask_generator.generate(images)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n for ann in sorted_anns:\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n label += 1\n im = demo.get_image()\n\n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im"
},
{
"identifier": "inference_seem_pano",
"path": "task_adapter/seem/tasks/inference_seem_pano.py",
"snippet": "def inference_seem_pano(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n orig_size = images.shape[-2:]\n orig_h, orig_w = orig_size\n crop_box = [0,0,orig_w,orig_h]\n\n data = {\"image\": images, \"height\": orig_h, \"width\": orig_w}\n batch_inputs = [data]\n\n model.model.metadata = metadata\n outputs = model.model.evaluate(batch_inputs)\n\n pano_mask = outputs[0]['panoptic_seg'][0]\n pano_info = outputs[0]['panoptic_seg'][1]\n\n masks = []\n for seg_info in pano_info:\n masks += [pano_mask == seg_info['id']]\n masks = torch.stack(masks, dim=0)\n iou_preds = torch.ones(masks.shape[0], dtype=torch.float32)\n points = torch.zeros((masks.shape[0], 2), dtype=torch.float32)\n\n mask_data = MaskData(\n masks=masks,\n iou_preds=iou_preds,\n points=points,\n )\n mask_data[\"stability_score\"] = torch.ones(masks.shape[0], dtype=torch.float32)\n del masks\n\n mask_data[\"boxes\"] = batched_mask_to_box(mask_data[\"masks\"])\n mask_data[\"crop_boxes\"] = torch.tensor([crop_box for _ in range(len(mask_data[\"boxes\"]))])\n\n # Compress to RLE\n mask_data[\"masks\"] = uncrop_masks(mask_data[\"masks\"], crop_box, orig_h, orig_w)\n mask_data[\"rles\"] = mask_to_rle_pytorch(mask_data[\"masks\"])\n del mask_data[\"masks\"]\n mask_data[\"segmentations\"] = [rle_to_mask(rle) for rle in mask_data[\"rles\"]]\n\n # Write mask records\n outputs = []\n for idx in range(len(mask_data[\"segmentations\"])):\n ann = {\n \"segmentation\": mask_data[\"segmentations\"][idx],\n \"area\": area_from_rle(mask_data[\"rles\"][idx]),\n \"bbox\": box_xyxy_to_xywh(mask_data[\"boxes\"][idx]).tolist(),\n \"predicted_iou\": mask_data[\"iou_preds\"][idx].item(),\n \"point_coords\": [mask_data[\"points\"][idx].tolist()],\n \"stability_score\": mask_data[\"stability_score\"][idx].item(),\n \"crop_box\": box_xyxy_to_xywh(mask_data[\"crop_boxes\"][idx]).tolist(),\n }\n outputs.append(ann)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n # create a full zero image as the image_orig\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image()\n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns"
},
{
"identifier": "inference_seem_interactive",
"path": "task_adapter/seem/tasks/inference_seem_interactive.py",
"snippet": "def inference_seem_interactive(model, image, spatial_masks, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n orig_size = images.shape[-2:]\n orig_h, orig_w = orig_size\n crop_box = [0,0,orig_w,orig_h]\n\n data = {\"image\": images, \"height\": orig_h, \"width\": orig_w}\n\n spatial_masks = spatial_masks[:, None].float().cuda()\n spatial_masks = F.interpolate(spatial_masks, size=(orig_h, orig_w), mode='bicubic', align_corners=False) > 0\n data['spatial_query'] = {'rand_shape': spatial_masks}\n\n model.model.metadata = metadata\n masks, _ = model.model.evaluate_demo([data])\n masks = masks > 0.0\n iou_preds = torch.ones(masks.shape[0], dtype=torch.float32)\n points = torch.zeros((masks.shape[0], 2), dtype=torch.float32)\n\n mask_data = MaskData(\n masks=masks,\n iou_preds=iou_preds,\n points=points,\n )\n\n mask_data[\"stability_score\"] = torch.ones(masks.shape[0], dtype=torch.float32)\n del masks\n\n mask_data[\"boxes\"] = batched_mask_to_box(mask_data[\"masks\"])\n mask_data[\"crop_boxes\"] = torch.tensor([crop_box for _ in range(len(mask_data[\"boxes\"]))])\n\n # Compress to RLE\n mask_data[\"masks\"] = uncrop_masks(mask_data[\"masks\"], crop_box, orig_h, orig_w)\n mask_data[\"rles\"] = mask_to_rle_pytorch(mask_data[\"masks\"])\n del mask_data[\"masks\"]\n mask_data[\"segmentations\"] = [rle_to_mask(rle) for rle in mask_data[\"rles\"]]\n\n # Write mask records\n outputs = []\n for idx in range(len(mask_data[\"segmentations\"])):\n ann = {\n \"segmentation\": mask_data[\"segmentations\"][idx],\n \"area\": area_from_rle(mask_data[\"rles\"][idx]),\n \"bbox\": box_xyxy_to_xywh(mask_data[\"boxes\"][idx]).tolist(),\n \"predicted_iou\": mask_data[\"iou_preds\"][idx].item(),\n \"point_coords\": [mask_data[\"points\"][idx].tolist()],\n \"stability_score\": mask_data[\"stability_score\"][idx].item(),\n \"crop_box\": box_xyxy_to_xywh(mask_data[\"crop_boxes\"][idx]).tolist(),\n }\n outputs.append(ann)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n # for ann in sorted_anns:\n # mask = ann['segmentation']\n # color_mask = np.random.random((1, 3)).tolist()[0]\n # # color_mask = [int(c*255) for c in color_mask]\n # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # label += 1\n # im = demo.get_image()\n\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image()\n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns"
},
{
"identifier": "inference_semsam_m2m_auto",
"path": "task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py",
"snippet": "def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n mask_generator = SemanticSamAutomaticMaskGenerator(model,points_per_side=32,\n pred_iou_thresh=0.88,\n stability_score_thresh=0.92,\n min_mask_region_area=10,\n level=level,\n )\n outputs = mask_generator.generate(images)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n # for ann in sorted_anns:\n # mask = ann['segmentation']\n # color_mask = np.random.random((1, 3)).tolist()[0]\n # # color_mask = [int(c*255) for c in color_mask]\n # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # label += 1\n # im = demo.get_image()\n\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image() \n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns"
},
{
"identifier": "prompt_switch",
"path": "task_adapter/semantic_sam/tasks/automatic_mask_generator.py",
"snippet": "def prompt_switch(p):\n p = int(p)\n if p == 1:\n return 3\n if p == 2:\n return 2\n if p == 3:\n return 0\n if p == 4:\n return 4\n if p == 5:\n return 1\n if p == 6:\n return 5\n else:\n raise NotImplementedError"
},
{
"identifier": "inference_sam_m2m_auto",
"path": "task_adapter/sam/tasks/inference_sam_m2m_auto.py",
"snippet": "def inference_sam_m2m_auto(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n image_ori = np.asarray(image_ori)\n\n mask_generator = SamAutomaticMaskGenerator(model)\n outputs = mask_generator.generate(image_ori)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n # for ann in sorted_anns:\n # mask = ann['segmentation']\n # color_mask = np.random.random((1, 3)).tolist()[0]\n # # color_mask = [int(c*255) for c in color_mask]\n # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # label += 1\n # im = demo.get_image()\n\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image() \n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns"
},
{
"identifier": "inference_sam_m2m_interactive",
"path": "task_adapter/sam/tasks/inference_sam_m2m_interactive.py",
"snippet": "def inference_sam_m2m_interactive(model, image, spatial_masks, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC))\n transform1 = transforms.Compose(t)\n image_ori = transform1(image)\n\n image_ori = np.asarray(image_ori)\n images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()\n\n orig_size = images.shape[-2:]\n orig_h, orig_w = orig_size\n crop_box = [0,0,orig_w,orig_h]\n\n spatial_masks = spatial_masks[:, None].float().cuda()\n spatial_masks = F.interpolate(spatial_masks, size=(orig_h, orig_w), mode='bicubic', align_corners=False) > 0\n\n # generate single center point\n # n,_,h,w = spatial_masks.shape\n # mask_dt = (distance_transform((~F.pad(spatial_masks, pad=(1, 1, 1, 1), mode='constant', value=0)).float())[:,:,1:-1,1:-1]).reshape(n,-1)\n # max_xy_idx = torch.stack([torch.arange(n), mask_dt.max(dim=-1)[1].cpu()]).tolist()\n # next_mask = torch.zeros(spatial_masks.shape, device=torch.cuda.current_device()).bool()\n # next_mask = next_mask.view(n,-1)\n # next_mask[max_xy_idx] = True\n # next_mask = next_mask.reshape((n,1,h,w))\n # points = next_mask.nonzero()[:,2:].flip(dims=[1]).cpu().numpy()\n\n # stack sampled points\n acc_points = []\n for i in range(len(spatial_masks)):\n points = spatial_masks[i:i+1].nonzero()[:,2:].flip(dims=[1]).cpu().numpy()\n rand_ids = np.random.choice(points.shape[0], size=40, replace=True)\n points = points[rand_ids]\n acc_points.append(points)\n _np = len(acc_points)\n points = np.concatenate(acc_points)\n\n mask_generator = SamAutomaticMaskGenerator(model)\n mask_generator.predictor.set_image(image_ori)\n im_size = image_ori.shape[:-1]\n\n transformed_points = mask_generator.predictor.transform.apply_coords(points, im_size)\n in_points = torch.as_tensor(transformed_points, device=mask_generator.predictor.device).reshape(_np,-1,2).transpose(0,1)\n in_labels = torch.ones((in_points.shape[0], _np), dtype=torch.int, device=mask_generator.predictor.device)\n\n masks = sam_interactive_mask(mask_generator, points, in_points.transpose(0,1), in_labels.transpose(0,1), None)\n\n masks = masks > 0.0\n iou_preds = torch.ones(masks.shape[0], dtype=torch.float32)\n points = torch.zeros((masks.shape[0], 2), dtype=torch.float32)\n\n mask_data = MaskData(\n masks=masks,\n iou_preds=iou_preds,\n points=points,\n )\n\n mask_data[\"stability_score\"] = torch.ones(masks.shape[0], dtype=torch.float32)\n del masks\n\n mask_data[\"boxes\"] = batched_mask_to_box(mask_data[\"masks\"])\n mask_data[\"crop_boxes\"] = torch.tensor([crop_box for _ in range(len(mask_data[\"boxes\"]))])\n\n # Compress to RLE\n mask_data[\"masks\"] = uncrop_masks(mask_data[\"masks\"], crop_box, orig_h, orig_w)\n mask_data[\"rles\"] = mask_to_rle_pytorch(mask_data[\"masks\"])\n del mask_data[\"masks\"]\n mask_data[\"segmentations\"] = [rle_to_mask(rle) for rle in mask_data[\"rles\"]]\n\n # Write mask records\n outputs = []\n for idx in range(len(mask_data[\"segmentations\"])):\n ann = {\n \"segmentation\": mask_data[\"segmentations\"][idx],\n \"area\": area_from_rle(mask_data[\"rles\"][idx]),\n \"bbox\": box_xyxy_to_xywh(mask_data[\"boxes\"][idx]).tolist(),\n \"predicted_iou\": mask_data[\"iou_preds\"][idx].item(),\n \"point_coords\": [mask_data[\"points\"][idx].tolist()],\n \"stability_score\": mask_data[\"stability_score\"][idx].item(),\n \"crop_box\": box_xyxy_to_xywh(mask_data[\"crop_boxes\"][idx]).tolist(),\n }\n outputs.append(ann)\n\n from task_adapter.utils.visualizer import Visualizer\n visual = Visualizer(image_ori, metadata=metadata)\n sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True)\n label = 1\n # for ann in sorted_anns:\n # mask = ann['segmentation']\n # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # label += 1\n # im = demo.get_image()\n\n mask_map = np.zeros(image_ori.shape, dtype=np.uint8) \n for i, ann in enumerate(sorted_anns):\n mask = ann['segmentation']\n color_mask = np.random.random((1, 3)).tolist()[0]\n # color_mask = [int(c*255) for c in color_mask]\n demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode)\n # assign the mask to the mask_map\n mask_map[mask == 1] = label\n label += 1\n im = demo.get_image() \n # fig=plt.figure(figsize=(10, 10))\n # plt.imshow(image_ori)\n # show_anns(outputs)\n # fig.canvas.draw()\n # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())\n return im, sorted_anns"
}
] | import gradio as gr
import torch
import argparse
import numpy as np
from seem.modeling.BaseModel import BaseModel as BaseModel_Seem
from seem.utils.distributed import init_distributed as init_distributed_seem
from seem.modeling import build_model as build_model_seem
from task_adapter.seem.tasks import interactive_seem_m2m_auto, inference_seem_pano, inference_seem_interactive
from semantic_sam.BaseModel import BaseModel
from semantic_sam import build_model
from semantic_sam.utils.dist import init_distributed_mode
from semantic_sam.utils.arguments import load_opt_from_config_file
from semantic_sam.utils.constants import COCO_PANOPTIC_CLASSES
from task_adapter.semantic_sam.tasks import inference_semsam_m2m_auto, prompt_switch
from segment_anything import sam_model_registry
from task_adapter.sam.tasks.inference_sam_m2m_auto import inference_sam_m2m_auto
from task_adapter.sam.tasks.inference_sam_m2m_interactive import inference_sam_m2m_interactive
from scipy.ndimage import label | 6,179 | # --------------------------------------------------------
# Set-of-Mark (SoM) Prompting for Visual Grounding in GPT-4V
# Copyright (c) 2023 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by:
# Jianwei Yang ([email protected])
# Xueyan Zou ([email protected])
# Hao Zhang ([email protected])
# --------------------------------------------------------
# seem
# semantic sam
# sam
'''
build args
'''
semsam_cfg = "configs/semantic_sam_only_sa-1b_swinL.yaml"
seem_cfg = "configs/seem_focall_unicl_lang_v1.yaml"
semsam_ckpt = "./swinl_only_sam_many2many.pth"
sam_ckpt = "./sam_vit_h_4b8939.pth"
seem_ckpt = "./seem_focall_v1.pt"
opt_semsam = load_opt_from_config_file(semsam_cfg)
opt_seem = load_opt_from_config_file(seem_cfg)
opt_seem = init_distributed_seem(opt_seem)
'''
build model
'''
model_semsam = BaseModel(opt_semsam, build_model(opt_semsam)).from_pretrained(semsam_ckpt).eval().cuda()
model_sam = sam_model_registry["vit_h"](checkpoint=sam_ckpt).eval().cuda()
model_seem = BaseModel_Seem(opt_seem, build_model_seem(opt_seem)).from_pretrained(seem_ckpt).eval().cuda()
with torch.no_grad():
with torch.autocast(device_type='cuda', dtype=torch.float16):
model_seem.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(COCO_PANOPTIC_CLASSES + ["background"], is_eval=True)
@torch.no_grad()
def inference(image, slider, mode, alpha, label_mode, anno_mode, *args, **kwargs):
if slider < 1.5:
model_name = 'seem'
elif slider > 2.5:
model_name = 'sam'
else:
if mode == 'Automatic':
model_name = 'semantic-sam'
if slider < 1.5 + 0.14:
level = [1]
elif slider < 1.5 + 0.28:
level = [2]
elif slider < 1.5 + 0.42:
level = [3]
elif slider < 1.5 + 0.56:
level = [4]
elif slider < 1.5 + 0.70:
level = [5]
elif slider < 1.5 + 0.84:
level = [6]
else:
level = [6, 1, 2, 3, 4, 5]
else:
model_name = 'sam'
if label_mode == 'Alphabet':
label_mode = 'a'
else:
label_mode = '1'
text_size, hole_scale, island_scale=640,100,100
text, text_part, text_thresh = '','','0.0'
with torch.autocast(device_type='cuda', dtype=torch.float16):
semantic=False
if mode == "Interactive":
labeled_array, num_features = label(np.asarray(image['mask'].convert('L')))
spatial_masks = torch.stack([torch.from_numpy(labeled_array == i+1) for i in range(num_features)])
if model_name == 'semantic-sam':
model = model_semsam
output, mask = inference_semsam_m2m_auto(model, image['image'], level, text, text_part, text_thresh, text_size, hole_scale, island_scale, semantic, label_mode=label_mode, alpha=alpha, anno_mode=anno_mode, *args, **kwargs)
elif model_name == 'sam':
model = model_sam
if mode == "Automatic":
| # --------------------------------------------------------
# Set-of-Mark (SoM) Prompting for Visual Grounding in GPT-4V
# Copyright (c) 2023 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by:
# Jianwei Yang ([email protected])
# Xueyan Zou ([email protected])
# Hao Zhang ([email protected])
# --------------------------------------------------------
# seem
# semantic sam
# sam
'''
build args
'''
semsam_cfg = "configs/semantic_sam_only_sa-1b_swinL.yaml"
seem_cfg = "configs/seem_focall_unicl_lang_v1.yaml"
semsam_ckpt = "./swinl_only_sam_many2many.pth"
sam_ckpt = "./sam_vit_h_4b8939.pth"
seem_ckpt = "./seem_focall_v1.pt"
opt_semsam = load_opt_from_config_file(semsam_cfg)
opt_seem = load_opt_from_config_file(seem_cfg)
opt_seem = init_distributed_seem(opt_seem)
'''
build model
'''
model_semsam = BaseModel(opt_semsam, build_model(opt_semsam)).from_pretrained(semsam_ckpt).eval().cuda()
model_sam = sam_model_registry["vit_h"](checkpoint=sam_ckpt).eval().cuda()
model_seem = BaseModel_Seem(opt_seem, build_model_seem(opt_seem)).from_pretrained(seem_ckpt).eval().cuda()
with torch.no_grad():
with torch.autocast(device_type='cuda', dtype=torch.float16):
model_seem.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(COCO_PANOPTIC_CLASSES + ["background"], is_eval=True)
@torch.no_grad()
def inference(image, slider, mode, alpha, label_mode, anno_mode, *args, **kwargs):
if slider < 1.5:
model_name = 'seem'
elif slider > 2.5:
model_name = 'sam'
else:
if mode == 'Automatic':
model_name = 'semantic-sam'
if slider < 1.5 + 0.14:
level = [1]
elif slider < 1.5 + 0.28:
level = [2]
elif slider < 1.5 + 0.42:
level = [3]
elif slider < 1.5 + 0.56:
level = [4]
elif slider < 1.5 + 0.70:
level = [5]
elif slider < 1.5 + 0.84:
level = [6]
else:
level = [6, 1, 2, 3, 4, 5]
else:
model_name = 'sam'
if label_mode == 'Alphabet':
label_mode = 'a'
else:
label_mode = '1'
text_size, hole_scale, island_scale=640,100,100
text, text_part, text_thresh = '','','0.0'
with torch.autocast(device_type='cuda', dtype=torch.float16):
semantic=False
if mode == "Interactive":
labeled_array, num_features = label(np.asarray(image['mask'].convert('L')))
spatial_masks = torch.stack([torch.from_numpy(labeled_array == i+1) for i in range(num_features)])
if model_name == 'semantic-sam':
model = model_semsam
output, mask = inference_semsam_m2m_auto(model, image['image'], level, text, text_part, text_thresh, text_size, hole_scale, island_scale, semantic, label_mode=label_mode, alpha=alpha, anno_mode=anno_mode, *args, **kwargs)
elif model_name == 'sam':
model = model_sam
if mode == "Automatic": | output, mask = inference_sam_m2m_auto(model, image['image'], text_size, label_mode, alpha, anno_mode) | 5 | 2023-10-16 03:39:26+00:00 | 8k |
codefuse-ai/Test-Agent | chat/server/gradio_testgpt.py | [
{
"identifier": "LOGDIR",
"path": "chat/constants.py",
"snippet": "LOGDIR = os.getenv(\"LOGDIR\", \".\")"
},
{
"identifier": "WORKER_API_TIMEOUT",
"path": "chat/constants.py",
"snippet": "WORKER_API_TIMEOUT = int(os.getenv(\"FASTCHAT_WORKER_API_TIMEOUT\", 100))"
},
{
"identifier": "ErrorCode",
"path": "chat/constants.py",
"snippet": "class ErrorCode(IntEnum):\n \"\"\"\n https://platform.openai.com/docs/guides/error-codes/api-errors\n \"\"\"\n\n VALIDATION_TYPE_ERROR = 40001\n\n INVALID_AUTH_KEY = 40101\n INCORRECT_AUTH_KEY = 40102\n NO_PERMISSION = 40103\n\n INVALID_MODEL = 40301\n PARAM_OUT_OF_RANGE = 40302\n CONTEXT_OVERFLOW = 40303\n\n RATE_LIMIT = 42901\n QUOTA_EXCEEDED = 42902\n ENGINE_OVERLOADED = 42903\n\n INTERNAL_ERROR = 50001\n CUDA_OUT_OF_MEMORY = 50002\n GRADIO_REQUEST_ERROR = 50003\n GRADIO_STREAM_UNKNOWN_ERROR = 50004\n CONTROLLER_NO_WORKER = 50005\n CONTROLLER_WORKER_TIMEOUT = 50006"
},
{
"identifier": "MODERATION_MSG",
"path": "chat/constants.py",
"snippet": "MODERATION_MSG = \"YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE FIX YOUR INPUT AND TRY AGAIN.\""
},
{
"identifier": "CONVERSATION_LIMIT_MSG",
"path": "chat/constants.py",
"snippet": "CONVERSATION_LIMIT_MSG = \"YOU HAVE REACHED THE CONVERSATION LENGTH LIMIT. PLEASE CLEAR HISTORY AND START A NEW CONVERSATION.\""
},
{
"identifier": "SERVER_ERROR_MSG",
"path": "chat/constants.py",
"snippet": "SERVER_ERROR_MSG = (\n \"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**\"\n)"
},
{
"identifier": "INACTIVE_MSG",
"path": "chat/constants.py",
"snippet": "INACTIVE_MSG = \"THIS SESSION HAS BEEN INACTIVE FOR TOO LONG. PLEASE REFRESH THIS PAGE.\""
},
{
"identifier": "INPUT_CHAR_LEN_LIMIT",
"path": "chat/constants.py",
"snippet": "INPUT_CHAR_LEN_LIMIT = int(os.getenv(\"FASTCHAT_INPUT_CHAR_LEN_LIMIT\", 2560))"
},
{
"identifier": "CONVERSATION_TURN_LIMIT",
"path": "chat/constants.py",
"snippet": "CONVERSATION_TURN_LIMIT = 50"
},
{
"identifier": "SESSION_EXPIRATION_TIME",
"path": "chat/constants.py",
"snippet": "SESSION_EXPIRATION_TIME = 3600"
},
{
"identifier": "get_conversation_template",
"path": "chat/model/model_adapter.py",
"snippet": "def get_conversation_template(model_path: str) -> Conversation:\n \"\"\"Get the default conversation template.\"\"\"\n adapter = get_model_adapter(model_path)\n return adapter.get_default_conv_template(model_path)"
},
{
"identifier": "model_info",
"path": "chat/model/model_registry.py",
"snippet": "def register_model_info(\n full_names: List[str], simple_name: str, link: str, description: str\n):\ndef get_model_info(name: str) -> ModelInfo:"
},
{
"identifier": "anthropic_api_stream_iter",
"path": "chat/server/api_provider.py",
"snippet": "def anthropic_api_stream_iter(model_name, prompt, temperature, top_p, max_new_tokens):\n import anthropic\n\n c = anthropic.Anthropic(api_key=os.environ[\"ANTHROPIC_API_KEY\"])\n\n # Make requests\n gen_params = {\n \"model\": model_name,\n \"prompt\": prompt,\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"max_new_tokens\": max_new_tokens,\n }\n logger.info(f\"==== request ====\\n{gen_params}\")\n\n res = c.completions.create(\n prompt=prompt,\n stop_sequences=[anthropic.HUMAN_PROMPT],\n max_tokens_to_sample=max_new_tokens,\n temperature=temperature,\n top_p=top_p,\n model=model_name,\n stream=True,\n )\n text = \"\"\n for chunk in res:\n text += chunk.completion\n data = {\n \"text\": text,\n \"error_code\": 0,\n }\n yield data"
},
{
"identifier": "openai_api_stream_iter",
"path": "chat/server/api_provider.py",
"snippet": "def openai_api_stream_iter(\n model_name,\n messages,\n temperature,\n top_p,\n max_new_tokens,\n api_base=None,\n api_key=None,\n):\n import openai\n\n openai.api_base = api_base or \"https://api.openai.com/v1\"\n openai.api_key = api_key or os.environ[\"OPENAI_API_KEY\"]\n\n # Make requests\n gen_params = {\n \"model\": model_name,\n \"prompt\": messages,\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"max_new_tokens\": max_new_tokens,\n }\n logger.info(f\"==== request ====\\n{gen_params}\")\n\n res = openai.ChatCompletion.create(\n model=model_name,\n messages=messages,\n temperature=temperature,\n max_tokens=max_new_tokens,\n stream=True,\n )\n text = \"\"\n for chunk in res:\n text += chunk[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n data = {\n \"text\": text,\n \"error_code\": 0,\n }\n yield data"
},
{
"identifier": "palm_api_stream_iter",
"path": "chat/server/api_provider.py",
"snippet": "def palm_api_stream_iter(chat, message, temperature, top_p, max_new_tokens):\n parameters = {\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"max_output_tokens\": max_new_tokens,\n }\n gen_params = {\n \"model\": \"palm-2\",\n \"prompt\": message,\n }\n gen_params.update(parameters)\n logger.info(f\"==== request ====\\n{gen_params}\")\n\n response = chat.send_message(message, **parameters)\n content = response.text\n\n pos = 0\n while pos < len(content):\n # This is a fancy way to simulate token generation latency combined\n # with a Poisson process.\n pos += random.randint(10, 20)\n time.sleep(random.expovariate(50))\n data = {\n \"text\": content[:pos],\n \"error_code\": 0,\n }\n yield data"
},
{
"identifier": "init_palm_chat",
"path": "chat/server/api_provider.py",
"snippet": "def init_palm_chat(model_name):\n import vertexai # pip3 install google-cloud-aiplatform\n from vertexai.preview.language_models import ChatModel\n\n project_id = os.environ[\"GCP_PROJECT_ID\"]\n location = \"us-central1\"\n vertexai.init(project=project_id, location=location)\n\n chat_model = ChatModel.from_pretrained(model_name)\n chat = chat_model.start_chat(examples=[])\n return chat"
},
{
"identifier": "build_logger",
"path": "chat/utils.py",
"snippet": "def build_logger(logger_name, logger_filename):\n def __init__(self, logger, log_level=logging.INFO):\n def __getattr__(self, attr):\n def write(self, buf):\n def flush(self):\ndef disable_torch_init():\ndef get_gpu_memory(max_gpus=None):\ndef violates_moderation(text):\ndef clean_flant5_ckpt(ckpt_path):\ndef pretty_print_semaphore(semaphore):\ndef iter_over_async(\n async_gen: AsyncGenerator, event_loop: AbstractEventLoop\n) -> Generator:\n async def get_next():\ndef detect_language(text: str) -> str:\ndef parse_gradio_auth_creds(filename: str):\ndef is_partial_stop(output: str, stop_str: str):\ndef run_cmd(cmd: str):\ndef is_sentence_complete(output: str):\ndef get_context_length(config):\nclass StreamToLogger(object):\nSEQUENCE_LENGTH_KEYS = [\n \"max_sequence_length\",\n \"seq_length\",\n \"max_position_embeddings\",\n \"max_seq_len\",\n \"model_max_length\",\n]"
}
] | import argparse
import datetime
import json
import os
import time
import uuid
import gradio as gr
import requests
from collections import defaultdict
from chat.constants import (
LOGDIR,
WORKER_API_TIMEOUT,
ErrorCode,
MODERATION_MSG,
CONVERSATION_LIMIT_MSG,
SERVER_ERROR_MSG,
INACTIVE_MSG,
INPUT_CHAR_LEN_LIMIT,
CONVERSATION_TURN_LIMIT,
SESSION_EXPIRATION_TIME,
)
from chat.model.model_adapter import get_conversation_template
from chat.model.model_registry import model_info
from chat.server.api_provider import (
anthropic_api_stream_iter,
openai_api_stream_iter,
palm_api_stream_iter,
init_palm_chat,
)
from chat.utils import (
build_logger,
violates_moderation,
get_window_url_params_js,
parse_gradio_auth_creds,
) | 3,906 | interactive=True,
show_label=False,
container=False,
)
chatbot = gr.Chatbot(
elem_id="chatbot",
label="Scroll down and start chatting",
visible=False,
height=550,
)
with gr.Row(visible=True) as button_fun_row:
gen_testcase_btn = gr.Button(value="单测生成")
assert_completion_btn = gr.Button(value="Assert补全")
with gr.Row():
with gr.Column(scale=20):
textbox = gr.Textbox(
show_label=False,
placeholder="Enter text and press ENTER",
visible=False,
container=False,
)
with gr.Column(scale=1, min_width=100):
send_btn = gr.Button(value="Send", visible=False)
with gr.Row(visible=True) as button_row:
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=True)
clear_btn = gr.Button(value="🗑️ Clear history", interactive=True)
with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
interactive=True,
label="Temperature",
)
top_p = gr.Slider(
minimum=0.0,
maximum=1.0,
value=1.0,
step=0.1,
interactive=True,
label="Top P",
)
max_output_tokens = gr.Slider(
minimum=16,
maximum=1024,
value=512,
step=64,
interactive=True,
label="Max output tokens",
)
# Register listeners
#btn_list = [regenerate_btn, clear_btn]
btn_list = []
regenerate_btn.click(regenerate, state, [state, chatbot, textbox] + btn_list).then(
bot_response,
[state, temperature, top_p, max_output_tokens],
[state, chatbot] + btn_list,
)
clear_btn.click(clear_history, None, [state, chatbot, textbox] + btn_list)
gen_testcase_btn.click(fn=gen_testcase, outputs=textbox)
assert_completion_btn.click(fn=assert_completion, outputs=textbox)
model_selector.change(clear_history, None, [state, chatbot, textbox] + btn_list)
textbox.submit(
add_text, [state, model_selector, textbox], [state, chatbot, textbox] + btn_list
).then(
bot_response,
[state, temperature, top_p, max_output_tokens],
[state, chatbot] + btn_list,
)
send_btn.click(
add_text, [state, model_selector, textbox], [state, chatbot, textbox] + btn_list
).then(
bot_response,
[state, temperature, top_p, max_output_tokens],
[state, chatbot] + btn_list,
)
return state, model_selector, chatbot, textbox, send_btn, button_row, parameter_row
def build_demo(models):
with gr.Blocks(
title="TestAgent",
theme=gr.themes.Base(),
css=block_css,
) as demo:
url_params = gr.JSON(visible=False)
(
state,
model_selector,
chatbot,
textbox,
send_btn,
button_row,
parameter_row,
) = build_single_model_ui(models, add_promotion_links=True)
if args.model_list_mode not in ["once", "reload"]:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
demo.load(
load_demo,
[url_params],
[
state,
model_selector,
chatbot,
textbox,
send_btn,
button_row,
parameter_row,
],
| """
The gradio demo server for chatting with a single model.
"""
logger = build_logger("gradio_web_server", "gradio_web_server.log")
headers = {"User-Agent": "FastChat Client"}
no_change_btn = gr.Button.update()
enable_btn = gr.Button.update(interactive=True)
disable_btn = gr.Button.update(interactive=False)
controller_url = None
enable_moderation = False
acknowledgment_md = """
**Acknowledgment:** We thank Kaggle, MBZUAI, and AnyScale for their sponsorship.
"""
ip_expiration_dict = defaultdict(lambda: 0)
# Information about custom OpenAI compatible API models.
# JSON file format:
# {
# "vicuna-7b": {
# "model_name": "vicuna-7b-v1.5",
# "api_base": "http://8.8.8.55:5555/v1",
# "api_key": "password"
# },
# }
openai_compatible_models_info = {}
class State:
def __init__(self, model_name):
self.conv = get_conversation_template(model_name)
self.conv_id = uuid.uuid4().hex
self.skip_next = False
self.model_name = model_name
if model_name == "palm-2":
# According to release note, "chat-bison@001" is PaLM 2 for chat.
# https://cloud.google.com/vertex-ai/docs/release-notes#May_10_2023
self.palm_chat = init_palm_chat("chat-bison@001")
def to_gradio_chatbot(self):
return self.conv.to_gradio_chatbot()
def dict(self):
base = self.conv.dict()
base.update(
{
"conv_id": self.conv_id,
"model_name": self.model_name,
}
)
return base
def set_global_vars(controller_url_, enable_moderation_):
global controller_url, enable_moderation
controller_url = controller_url_
enable_moderation = enable_moderation_
def get_conv_log_filename():
t = datetime.datetime.now()
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
return name
def get_model_list(
controller_url, register_openai_compatible_models, add_chatgpt, add_claude, add_palm
):
if controller_url:
ret = requests.post(controller_url + "/refresh_all_workers")
assert ret.status_code == 200
ret = requests.post(controller_url + "/list_models")
models = ret.json()["models"]
else:
models = []
# Add API providers
if register_openai_compatible_models:
global openai_compatible_models_info
openai_compatible_models_info = json.load(
open(register_openai_compatible_models)
)
models += list(openai_compatible_models_info.keys())
if add_chatgpt:
models += ["gpt-3.5-turbo", "gpt-4"]
if add_claude:
models += ["claude-2", "claude-instant-1"]
if add_palm:
models += ["palm-2"]
models = list(set(models))
priority = {k: f"___{i:02d}" for i, k in enumerate(model_info)}
models.sort(key=lambda x: priority.get(x, x))
logger.info(f"Models: {models}")
return models
def load_demo_single(models, url_params):
selected_model = models[0] if len(models) > 0 else ""
if "model" in url_params:
model = url_params["model"]
if model in models:
selected_model = model
dropdown_update = gr.Dropdown.update(
choices=models, value=selected_model, visible=True
)
state = None
return (
state,
dropdown_update,
gr.Chatbot.update(visible=True),
gr.Textbox.update(visible=True),
gr.Button.update(visible=True),
gr.Row.update(visible=True),
gr.Accordion.update(visible=True),
)
def load_demo(url_params, request: gr.Request):
global models
ip = request.client.host
logger.info(f"load_demo. ip: {ip}. params: {url_params}")
ip_expiration_dict[ip] = time.time() + SESSION_EXPIRATION_TIME
if args.model_list_mode == "reload":
models = get_model_list(
controller_url,
args.register_openai_compatible_models,
args.add_chatgpt,
args.add_claude,
args.add_palm,
)
return load_demo_single(models, url_params)
def vote_last_response(state, vote_type, model_selector, request: gr.Request):
with open(get_conv_log_filename(), "a") as fout:
data = {
"tstamp": round(time.time(), 4),
"type": vote_type,
"model": model_selector,
"state": state.dict(),
"ip": request.client.host,
}
fout.write(json.dumps(data) + "\n")
def upvote_last_response(state, model_selector, request: gr.Request):
logger.info(f"upvote. ip: {request.client.host}")
vote_last_response(state, "upvote", model_selector, request)
return ("",) + (disable_btn,) * 3
def downvote_last_response(state, model_selector, request: gr.Request):
logger.info(f"downvote. ip: {request.client.host}")
vote_last_response(state, "downvote", model_selector, request)
return ("",) + (disable_btn,) * 3
def flag_last_response(state, model_selector, request: gr.Request):
logger.info(f"flag. ip: {request.client.host}")
vote_last_response(state, "flag", model_selector, request)
return ("",) + (disable_btn,) * 3
def regenerate(state, request: gr.Request):
logger.info(f"regenerate. ip: {request.client.host}")
state.conv.update_last_message(None)
return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 2
def clear_history(request: gr.Request):
logger.info(f"clear_history. ip: {request.client.host}")
state = None
return (state, [], "") + (disable_btn,) * 2
def gen_testcase():
return "为以下代码写单测:\n" + \
"```\n" + \
"def prime_and_fibonacci_less_than(n):\n" + \
" # Generating prime numbers\n" + \
" primes = []\n" + \
" for x in range(2, n):\n" + \
" for y in range(2, x):\n" + \
" if x % y == 0:\n" + \
" break\n" + \
" else:\n" + \
" primes.append(x)\n" + \
" \n" + \
" # Generating Fibonacci numbers\n" + \
" fibonacci = []\n" + \
" a, b = 0, 1\n" + \
" while a < n:\n" + \
" fibonacci.append(a)\n" + \
" a, b = b, a+b\n" + \
"\n" + \
" return {'Primes': primes, 'Fibonacci': fibonacci}\n" + \
"\n" + \
"# Testing the function\n" + \
"print(prime_and_fibonacci_less_than(20))\n" + \
"```"
def assert_completion():
return "下面是被测代码\n" + \
"```java\n" + \
"public class LongCollectorImpl implements LongCollector<A, R> {\n" + \
" private final Set<Collector.Characteristics> characteristics;\n" + \
"\n" + \
" @Override\n" + \
" public Set<Collector.Characteristics> characteristics() {\n" + \
" return characteristics;\n" + \
" }\n" + \
"}\n" + \
"```\n" + \
"下面代码是针对上面被测代码生成的用例,请补全用例,生成assert校验\n" + \
"```java\n" + \
"private static final Set<Collector.Characteristics> CHARACTERISTICS = emptySet();\n" + \
"private static final LongCollectorImpl<List<Long>, Long> COLLECTOR = new LongCollectorImpl<>(\n" + \
" SUPPLIER, ACCUMULATOR, COMBINER, FINISHER, CHARACTERISTICS);\n" + \
"\n" + \
"@Test\n" + \
"void characteristics() {\n" + \
" COLLECTOR.characteristics();\n" + \
"}\n" + \
"\n" + \
"```"
def add_text(state, model_selector, text, request: gr.Request):
ip = request.client.host
logger.info(f"add_text. ip: {ip}. len: {len(text)}")
if state is None:
state = State(model_selector)
if len(text) <= 0:
state.skip_next = True
return (state, state.to_gradio_chatbot(), "") + (no_change_btn,) * 5
if ip_expiration_dict[ip] < time.time():
logger.info(f"inactive. ip: {request.client.host}. text: {text}")
state.skip_next = True
return (state, state.to_gradio_chatbot(), INACTIVE_MSG) + (no_change_btn,) * 5
if enable_moderation:
flagged = violates_moderation(text)
if flagged:
logger.info(f"violate moderation. ip: {request.client.host}. text: {text}")
state.skip_next = True
return (state, state.to_gradio_chatbot(), MODERATION_MSG) + (
no_change_btn,
) * 5
conv = state.conv
if (len(conv.messages) - conv.offset) // 2 >= CONVERSATION_TURN_LIMIT:
logger.info(f"conversation turn limit. ip: {request.client.host}. text: {text}")
state.skip_next = True
return (state, state.to_gradio_chatbot(), CONVERSATION_LIMIT_MSG) + (
no_change_btn,
) * 5
text = text[:INPUT_CHAR_LEN_LIMIT] # Hard cut-off
conv.append_message(conv.roles[0], text)
conv.append_message(conv.roles[1], None)
return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5
def post_process_code(code):
sep = "\n```"
if sep in code:
blocks = code.split(sep)
if len(blocks) % 2 == 1:
for i in range(1, len(blocks), 2):
blocks[i] = blocks[i].replace("\\_", "_")
code = sep.join(blocks)
return code
def model_worker_stream_iter(
conv,
model_name,
worker_addr,
prompt,
temperature,
repetition_penalty,
top_p,
max_new_tokens,
):
# Make requests
gen_params = {
"model": model_name,
"prompt": prompt,
"temperature": temperature,
"repetition_penalty": repetition_penalty,
"top_p": top_p,
"max_new_tokens": max_new_tokens,
"stop": conv.stop_str,
"stop_token_ids": conv.stop_token_ids,
"echo": False,
}
logger.info(f"==== request ====\n{gen_params}")
# Stream output
response = requests.post(
worker_addr + "/worker_generate_stream",
headers=headers,
json=gen_params,
stream=True,
timeout=WORKER_API_TIMEOUT,
)
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
if chunk:
data = json.loads(chunk.decode())
yield data
def bot_response(state, temperature, top_p, max_new_tokens, request: gr.Request):
logger.info(f"bot_response. ip: {request.client.host}")
start_tstamp = time.time()
temperature = float(temperature)
top_p = float(top_p)
max_new_tokens = int(max_new_tokens)
if state.skip_next:
# This generate call is skipped due to invalid inputs
state.skip_next = False
yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
return
conv, model_name = state.conv, state.model_name
if model_name == "gpt-3.5-turbo" or model_name == "gpt-4":
prompt = conv.to_openai_api_messages()
stream_iter = openai_api_stream_iter(
model_name, prompt, temperature, top_p, max_new_tokens
)
elif model_name == "claude-2" or model_name == "claude-instant-1":
prompt = conv.get_prompt()
stream_iter = anthropic_api_stream_iter(
model_name, prompt, temperature, top_p, max_new_tokens
)
elif model_name == "palm-2":
stream_iter = palm_api_stream_iter(
state.palm_chat, conv.messages[-2][1], temperature, top_p, max_new_tokens
)
elif model_name in openai_compatible_models_info:
model_info = openai_compatible_models_info[model_name]
prompt = conv.to_openai_api_messages()
stream_iter = openai_api_stream_iter(
model_info["model_name"],
prompt,
temperature,
top_p,
max_new_tokens,
api_base=model_info["api_base"],
api_key=model_info["api_key"],
)
else:
# Query worker address
ret = requests.post(
controller_url + "/get_worker_address", json={"model": model_name}
)
worker_addr = ret.json()["address"]
logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}")
# No available worker
if worker_addr == "":
conv.update_last_message(SERVER_ERROR_MSG)
yield (
state,
state.to_gradio_chatbot(),
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
# Construct prompt.
# We need to call it here, so it will not be affected by "▌".
prompt = conv.get_prompt()
# Set repetition_penalty
if "t5" in model_name:
repetition_penalty = 1.2
else:
repetition_penalty = 1.0
stream_iter = model_worker_stream_iter(
conv,
model_name,
worker_addr,
prompt,
temperature,
repetition_penalty,
top_p,
max_new_tokens,
)
conv.update_last_message("▌")
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
try:
for i, data in enumerate(stream_iter):
if data["error_code"] == 0:
if i % 5 != 0: # reduce gradio's overhead
continue
output = data["text"].strip()
conv.update_last_message(output + "▌")
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
else:
output = data["text"] + f"\n\n(error_code: {data['error_code']})"
conv.update_last_message(output)
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
output = data["text"].strip()
if "vicuna" in model_name:
output = post_process_code(output)
conv.update_last_message(output)
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
except requests.exceptions.RequestException as e:
conv.update_last_message(
f"{SERVER_ERROR_MSG}\n\n"
f"(error_code: {ErrorCode.GRADIO_REQUEST_ERROR}, {e})"
)
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
except Exception as e:
conv.update_last_message(
f"{SERVER_ERROR_MSG}\n\n"
f"(error_code: {ErrorCode.GRADIO_STREAM_UNKNOWN_ERROR}, {e})"
)
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
finish_tstamp = time.time()
logger.info(f"{output}")
with open(get_conv_log_filename(), "a") as fout:
data = {
"tstamp": round(finish_tstamp, 4),
"type": "chat",
"model": model_name,
"gen_params": {
"temperature": temperature,
"top_p": top_p,
"max_new_tokens": max_new_tokens,
},
"start": round(start_tstamp, 4),
"finish": round(finish_tstamp, 4),
"state": state.dict(),
"ip": request.client.host,
}
fout.write(json.dumps(data) + "\n")
block_css = """
#notice_markdown {
font-size: 104%
}
#notice_markdown th {
display: none;
}
#notice_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#leaderboard_markdown {
font-size: 104%
}
#leaderboard_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#leaderboard_dataframe td {
line-height: 0.1em;
}
footer {
display:none !important
}
"""
def get_model_description_md(models):
model_description_md = """
| | | |
| ---- | ---- | ---- |
"""
ct = 0
visited = set()
for i, name in enumerate(models):
if name in model_info:
minfo = model_info[name]
if minfo.simple_name in visited:
continue
visited.add(minfo.simple_name)
one_model_md = f"[{minfo.simple_name}]({minfo.link}): {minfo.description}"
else:
visited.add(name)
one_model_md = (
f"[{name}](): Add the description at chat/model/model_registry.py"
)
if ct % 3 == 0:
model_description_md += "|"
model_description_md += f" {one_model_md} |"
if ct % 3 == 2:
model_description_md += "\n"
ct += 1
return model_description_md
# TODO
def build_single_model_ui(models, add_promotion_links=False):
promotion = (
"""
- TestGPT-7B: 模型以CodeLlama-7B为基座,进行了测试领域下游任务的微调,包含多语言测试用例生成、测试用例Assert补全。 [[ModelScope]](https://modelscope.cn/models/codefuse-ai/TestGPT-7B/summary)
"""
if add_promotion_links
else ""
)
notice_markdown = f"""
# 🏔️ TestAgent 测试助理
{promotion}
### 请选择模型
"""
state = gr.State()
model_description_md = get_model_description_md(models)
gr.Markdown(notice_markdown + model_description_md, elem_id="notice_markdown")
fun = ["testcase", "assert"]
with gr.Row(elem_id="model_selector_row"):
model_selector = gr.Dropdown(
choices=models,
value=models[0] if len(models) > 0 else "",
interactive=True,
show_label=False,
container=False,
)
chatbot = gr.Chatbot(
elem_id="chatbot",
label="Scroll down and start chatting",
visible=False,
height=550,
)
with gr.Row(visible=True) as button_fun_row:
gen_testcase_btn = gr.Button(value="单测生成")
assert_completion_btn = gr.Button(value="Assert补全")
with gr.Row():
with gr.Column(scale=20):
textbox = gr.Textbox(
show_label=False,
placeholder="Enter text and press ENTER",
visible=False,
container=False,
)
with gr.Column(scale=1, min_width=100):
send_btn = gr.Button(value="Send", visible=False)
with gr.Row(visible=True) as button_row:
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=True)
clear_btn = gr.Button(value="🗑️ Clear history", interactive=True)
with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
interactive=True,
label="Temperature",
)
top_p = gr.Slider(
minimum=0.0,
maximum=1.0,
value=1.0,
step=0.1,
interactive=True,
label="Top P",
)
max_output_tokens = gr.Slider(
minimum=16,
maximum=1024,
value=512,
step=64,
interactive=True,
label="Max output tokens",
)
# Register listeners
#btn_list = [regenerate_btn, clear_btn]
btn_list = []
regenerate_btn.click(regenerate, state, [state, chatbot, textbox] + btn_list).then(
bot_response,
[state, temperature, top_p, max_output_tokens],
[state, chatbot] + btn_list,
)
clear_btn.click(clear_history, None, [state, chatbot, textbox] + btn_list)
gen_testcase_btn.click(fn=gen_testcase, outputs=textbox)
assert_completion_btn.click(fn=assert_completion, outputs=textbox)
model_selector.change(clear_history, None, [state, chatbot, textbox] + btn_list)
textbox.submit(
add_text, [state, model_selector, textbox], [state, chatbot, textbox] + btn_list
).then(
bot_response,
[state, temperature, top_p, max_output_tokens],
[state, chatbot] + btn_list,
)
send_btn.click(
add_text, [state, model_selector, textbox], [state, chatbot, textbox] + btn_list
).then(
bot_response,
[state, temperature, top_p, max_output_tokens],
[state, chatbot] + btn_list,
)
return state, model_selector, chatbot, textbox, send_btn, button_row, parameter_row
def build_demo(models):
with gr.Blocks(
title="TestAgent",
theme=gr.themes.Base(),
css=block_css,
) as demo:
url_params = gr.JSON(visible=False)
(
state,
model_selector,
chatbot,
textbox,
send_btn,
button_row,
parameter_row,
) = build_single_model_ui(models, add_promotion_links=True)
if args.model_list_mode not in ["once", "reload"]:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
demo.load(
load_demo,
[url_params],
[
state,
model_selector,
chatbot,
textbox,
send_btn,
button_row,
parameter_row,
], | _js=get_window_url_params_js, | 16 | 2023-10-20 08:56:20+00:00 | 8k |
thuml/iTransformer | data_provider/data_factory.py | [
{
"identifier": "Dataset_ETT_hour",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_ETT_hour(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, timeenc=0, freq='h'):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24 * 4 * 4\n self.label_len = 24 * 4\n self.pred_len = 24 * 4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train': 0, 'val': 1, 'test': 2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.timeenc = timeenc\n self.freq = freq\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n\n border1s = [0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]\n border2s = [12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n if self.features == 'M' or self.features == 'MS':\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features == 'S':\n df_data = df_raw[[self.target]]\n\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n self.scaler.fit(train_data.values)\n data = self.scaler.transform(df_data.values)\n else:\n data = df_data.values\n\n df_stamp = df_raw[['date']][border1:border2]\n df_stamp['date'] = pd.to_datetime(df_stamp.date)\n if self.timeenc == 0:\n df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n data_stamp = df_stamp.drop(['date'], 1).values\n elif self.timeenc == 1:\n data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n data_stamp = data_stamp.transpose(1, 0)\n\n self.data_x = data[border1:border2]\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)"
},
{
"identifier": "Dataset_ETT_minute",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_ETT_minute(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTm1.csv',\n target='OT', scale=True, timeenc=0, freq='t'):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24 * 4 * 4\n self.label_len = 24 * 4\n self.pred_len = 24 * 4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train': 0, 'val': 1, 'test': 2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.timeenc = timeenc\n self.freq = freq\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n\n border1s = [0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]\n border2s = [12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n if self.features == 'M' or self.features == 'MS':\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features == 'S':\n df_data = df_raw[[self.target]]\n\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n self.scaler.fit(train_data.values)\n data = self.scaler.transform(df_data.values)\n else:\n data = df_data.values\n\n df_stamp = df_raw[['date']][border1:border2]\n df_stamp['date'] = pd.to_datetime(df_stamp.date)\n if self.timeenc == 0:\n df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n data_stamp = df_stamp.drop(['date'], 1).values\n elif self.timeenc == 1:\n data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n data_stamp = data_stamp.transpose(1, 0)\n\n self.data_x = data[border1:border2]\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)"
},
{
"identifier": "Dataset_Custom",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_Custom(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, timeenc=0, freq='h'):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24 * 4 * 4\n self.label_len = 24 * 4\n self.pred_len = 24 * 4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train': 0, 'val': 1, 'test': 2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.timeenc = timeenc\n self.freq = freq\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n\n '''\n df_raw.columns: ['date', ...(other features), target feature]\n '''\n cols = list(df_raw.columns)\n cols.remove(self.target)\n cols.remove('date')\n df_raw = df_raw[['date'] + cols + [self.target]]\n num_train = int(len(df_raw) * 0.7)\n num_test = int(len(df_raw) * 0.2)\n num_vali = len(df_raw) - num_train - num_test\n border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]\n border2s = [num_train, num_train + num_vali, len(df_raw)]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n if self.features == 'M' or self.features == 'MS':\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features == 'S':\n df_data = df_raw[[self.target]]\n\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n self.scaler.fit(train_data.values)\n data = self.scaler.transform(df_data.values)\n else:\n data = df_data.values\n\n df_stamp = df_raw[['date']][border1:border2]\n df_stamp['date'] = pd.to_datetime(df_stamp.date)\n if self.timeenc == 0:\n df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n data_stamp = df_stamp.drop(['date'], 1).values\n elif self.timeenc == 1:\n data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n data_stamp = data_stamp.transpose(1, 0)\n\n self.data_x = data[border1:border2]\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)"
},
{
"identifier": "Dataset_Solar",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_Solar(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, timeenc=0, freq='h'):\n # size [seq_len, label_len, pred_len]\n # info\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train': 0, 'val': 1, 'test': 2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.timeenc = timeenc\n self.freq = freq\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = []\n with open(os.path.join(self.root_path, self.data_path), \"r\", encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip('\\n').split(',')\n data_line = np.stack([float(i) for i in line])\n df_raw.append(data_line)\n df_raw = np.stack(df_raw, 0)\n df_raw = pd.DataFrame(df_raw)\n\n num_train = int(len(df_raw) * 0.7)\n num_test = int(len(df_raw) * 0.2)\n num_valid = int(len(df_raw) * 0.1)\n border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]\n border2s = [num_train, num_train + num_valid, len(df_raw)]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n df_data = df_raw.values\n\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n self.scaler.fit(train_data)\n data = self.scaler.transform(df_data)\n else:\n data = df_data\n\n self.data_x = data[border1:border2]\n self.data_y = data[border1:border2]\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = torch.zeros((seq_x.shape[0], 1))\n seq_y_mark = torch.zeros((seq_x.shape[0], 1))\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)"
},
{
"identifier": "Dataset_PEMS",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_PEMS(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, timeenc=0, freq='h'):\n # size [seq_len, label_len, pred_len]\n # info\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train': 0, 'val': 1, 'test': 2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.timeenc = timeenc\n self.freq = freq\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n data_file = os.path.join(self.root_path, self.data_path)\n data = np.load(data_file, allow_pickle=True)\n data = data['data'][:, :, 0]\n\n train_ratio = 0.6\n valid_ratio = 0.2\n train_data = data[:int(train_ratio * len(data))]\n valid_data = data[int(train_ratio * len(data)): int((train_ratio + valid_ratio) * len(data))]\n test_data = data[int((train_ratio + valid_ratio) * len(data)):]\n total_data = [train_data, valid_data, test_data]\n data = total_data[self.set_type]\n\n if self.scale:\n self.scaler.fit(train_data)\n data = self.scaler.transform(data)\n\n df = pd.DataFrame(data)\n df = df.fillna(method='ffill', limit=len(df)).fillna(method='bfill', limit=len(df)).values\n\n self.data_x = df\n self.data_y = df\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = torch.zeros((seq_x.shape[0], 1))\n seq_y_mark = torch.zeros((seq_x.shape[0], 1))\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)"
},
{
"identifier": "Dataset_Pred",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_Pred(Dataset):\n def __init__(self, root_path, flag='pred', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24 * 4 * 4\n self.label_len = 24 * 4\n self.pred_len = 24 * 4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['pred']\n\n self.features = features\n self.target = target\n self.scale = scale\n self.inverse = inverse\n self.timeenc = timeenc\n self.freq = freq\n self.cols = cols\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n '''\n df_raw.columns: ['date', ...(other features), target feature]\n '''\n if self.cols:\n cols = self.cols.copy()\n cols.remove(self.target)\n else:\n cols = list(df_raw.columns)\n cols.remove(self.target)\n cols.remove('date')\n df_raw = df_raw[['date'] + cols + [self.target]]\n border1 = len(df_raw) - self.seq_len\n border2 = len(df_raw)\n\n if self.features == 'M' or self.features == 'MS':\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features == 'S':\n df_data = df_raw[[self.target]]\n\n if self.scale:\n self.scaler.fit(df_data.values)\n data = self.scaler.transform(df_data.values)\n else:\n data = df_data.values\n\n tmp_stamp = df_raw[['date']][border1:border2]\n tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)\n pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len + 1, freq=self.freq)\n\n df_stamp = pd.DataFrame(columns=['date'])\n df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])\n if self.timeenc == 0:\n df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n data_stamp = df_stamp.drop(['date'], 1).values\n elif self.timeenc == 1:\n data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n data_stamp = data_stamp.transpose(1, 0)\n\n self.data_x = data[border1:border2]\n if self.inverse:\n self.data_y = df_data.values[border1:border2]\n else:\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n if self.inverse:\n seq_y = self.data_x[r_begin:r_begin + self.label_len]\n else:\n seq_y = self.data_y[r_begin:r_begin + self.label_len]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)"
}
] | from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Solar, Dataset_PEMS, \
Dataset_Pred
from torch.utils.data import DataLoader | 5,696 |
data_dict = {
'ETTh1': Dataset_ETT_hour,
'ETTh2': Dataset_ETT_hour,
'ETTm1': Dataset_ETT_minute,
'ETTm2': Dataset_ETT_minute,
|
data_dict = {
'ETTh1': Dataset_ETT_hour,
'ETTh2': Dataset_ETT_hour,
'ETTm1': Dataset_ETT_minute,
'ETTm2': Dataset_ETT_minute, | 'Solar': Dataset_Solar, | 3 | 2023-10-19 03:23:15+00:00 | 8k |
kylesargent/ZeroNVS | threestudio/models/geometry/implicit_sdf.py | [
{
"identifier": "BaseImplicitGeometry",
"path": "threestudio/models/geometry/base.py",
"snippet": "class BaseImplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n isosurface: bool = True\n isosurface_method: str = \"mt\"\n isosurface_resolution: int = 128\n isosurface_threshold: Union[float, str] = 0.0\n isosurface_chunk: int = 0\n isosurface_coarse_to_fine: bool = True\n isosurface_deformable_grid: bool = False\n isosurface_remove_outliers: bool = True\n isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )\n self.isosurface_helper: Optional[IsosurfaceHelper] = None\n self.unbounded: bool = True\n\n def _initilize_isosurface_helper(self):\n if self.cfg.isosurface and self.isosurface_helper is None:\n if self.cfg.isosurface_method == \"mc-cpu\":\n self.isosurface_helper = MarchingCubeCPUHelper(\n self.cfg.isosurface_resolution\n ).to(self.device)\n elif self.cfg.isosurface_method == \"mt\":\n self.isosurface_helper = MarchingTetrahedraHelper(\n self.cfg.isosurface_resolution,\n f\"load/tets/{self.cfg.isosurface_resolution}_tets.npz\",\n ).to(self.device)\n else:\n raise AttributeError(\n \"Unknown isosurface method {self.cfg.isosurface_method}\"\n )\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n raise NotImplementedError\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n # return the value of the implicit field, could be density / signed distance\n # also return a deformation field if the grid vertices can be optimized\n raise NotImplementedError\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n # return the value of the implicit field, where the zero level set represents the surface\n raise NotImplementedError\n\n def _isosurface(self, bbox: Float[Tensor, \"2 3\"], fine_stage: bool = False) -> Mesh:\n def batch_func(x):\n # scale to bbox as the input vertices are in [0, 1]\n field, deformation = self.forward_field(\n scale_tensor(\n x.to(bbox.device), self.isosurface_helper.points_range, bbox\n ),\n )\n field = field.to(\n x.device\n ) # move to the same device as the input (could be CPU)\n if deformation is not None:\n deformation = deformation.to(x.device)\n return field, deformation\n\n assert self.isosurface_helper is not None\n\n field, deformation = chunk_batch(\n batch_func,\n self.cfg.isosurface_chunk,\n self.isosurface_helper.grid_vertices,\n )\n\n threshold: float\n\n if isinstance(self.cfg.isosurface_threshold, float):\n threshold = self.cfg.isosurface_threshold\n elif self.cfg.isosurface_threshold == \"auto\":\n eps = 1.0e-5\n threshold = field[field > eps].mean().item()\n threestudio.info(\n f\"Automatically determined isosurface threshold: {threshold}\"\n )\n else:\n raise TypeError(\n f\"Unknown isosurface_threshold {self.cfg.isosurface_threshold}\"\n )\n\n level = self.forward_level(field, threshold)\n mesh: Mesh = self.isosurface_helper(level, deformation=deformation)\n mesh.v_pos = scale_tensor(\n mesh.v_pos, self.isosurface_helper.points_range, bbox\n ) # scale to bbox as the grid vertices are in [0, 1]\n mesh.add_extra(\"bbox\", bbox)\n\n if self.cfg.isosurface_remove_outliers:\n # remove outliers components with small number of faces\n # only enabled when the mesh is not differentiable\n mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)\n\n return mesh\n\n def isosurface(self) -> Mesh:\n if not self.cfg.isosurface:\n raise NotImplementedError(\n \"Isosurface is not enabled in the current configuration\"\n )\n self._initilize_isosurface_helper()\n if self.cfg.isosurface_coarse_to_fine:\n threestudio.debug(\"First run isosurface to get a tight bounding box ...\")\n with torch.no_grad():\n mesh_coarse = self._isosurface(self.bbox)\n vmin, vmax = mesh_coarse.v_pos.amin(dim=0), mesh_coarse.v_pos.amax(dim=0)\n vmin_ = (vmin - (vmax - vmin) * 0.1).max(self.bbox[0])\n vmax_ = (vmax + (vmax - vmin) * 0.1).min(self.bbox[1])\n threestudio.debug(\"Run isosurface again with the tight bounding box ...\")\n mesh = self._isosurface(torch.stack([vmin_, vmax_], dim=0), fine_stage=True)\n else:\n mesh = self._isosurface(self.bbox)\n return mesh"
},
{
"identifier": "contract_to_unisphere",
"path": "threestudio/models/geometry/base.py",
"snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n # import pdb\n # pdb.set_trace()\n\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x = x.clone()\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x"
},
{
"identifier": "Mesh",
"path": "threestudio/models/mesh.py",
"snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss"
},
{
"identifier": "get_encoding",
"path": "threestudio/models/networks.py",
"snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding"
},
{
"identifier": "get_mlp",
"path": "threestudio/models/networks.py",
"snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network"
},
{
"identifier": "broadcast",
"path": "threestudio/utils/misc.py",
"snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor"
},
{
"identifier": "get_rank",
"path": "threestudio/utils/misc.py",
"snippet": "def get_rank():\n # SLURM_PROCID can be set even if SLURM is not managing the multiprocessing,\n # therefore LOCAL_RANK needs to be checked first\n rank_keys = (\"RANK\", \"LOCAL_RANK\", \"SLURM_PROCID\", \"JSM_NAMESPACE_RANK\")\n for key in rank_keys:\n rank = os.environ.get(key)\n if rank is not None:\n return int(rank)\n return 0"
}
] | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
import trimesh
from dataclasses import dataclass, field
from threestudio.models.geometry.base import BaseImplicitGeometry, contract_to_unisphere
from threestudio.models.mesh import Mesh
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.misc import broadcast, get_rank
from threestudio.utils.typing import *
from pysdf import SDF
from tqdm import tqdm | 7,184 | if self.cfg.sdf_bias != 0.0:
threestudio.warn(
"shape_init and sdf_bias are both specified, which may lead to unexpected results."
)
get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]]
assert isinstance(self.cfg.shape_init, str)
if self.cfg.shape_init == "ellipsoid":
assert (
isinstance(self.cfg.shape_init_params, Sized)
and len(self.cfg.shape_init_params) == 3
)
size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
return ((points_rand / size) ** 2).sum(
dim=-1, keepdim=True
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid
get_gt_sdf = func
elif self.cfg.shape_init == "sphere":
assert isinstance(self.cfg.shape_init_params, float)
radius = self.cfg.shape_init_params
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius
get_gt_sdf = func
elif self.cfg.shape_init.startswith("mesh:"):
assert isinstance(self.cfg.shape_init_params, float)
mesh_path = self.cfg.shape_init[5:]
if not os.path.exists(mesh_path):
raise ValueError(f"Mesh file {mesh_path} does not exist.")
scene = trimesh.load(mesh_path)
if isinstance(scene, trimesh.Trimesh):
mesh = scene
elif isinstance(scene, trimesh.scene.Scene):
mesh = trimesh.Trimesh()
for obj in scene.geometry.values():
mesh = trimesh.util.concatenate([mesh, obj])
else:
raise ValueError(f"Unknown mesh type at {mesh_path}.")
# move to center
centroid = mesh.vertices.mean(0)
mesh.vertices = mesh.vertices - centroid
# align to up-z and front-x
dirs = ["+x", "+y", "+z", "-x", "-y", "-z"]
dir2vec = {
"+x": np.array([1, 0, 0]),
"+y": np.array([0, 1, 0]),
"+z": np.array([0, 0, 1]),
"-x": np.array([-1, 0, 0]),
"-y": np.array([0, -1, 0]),
"-z": np.array([0, 0, -1]),
}
if (
self.cfg.shape_init_mesh_up not in dirs
or self.cfg.shape_init_mesh_front not in dirs
):
raise ValueError(
f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}."
)
if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:
raise ValueError(
"shape_init_mesh_up and shape_init_mesh_front must be orthogonal."
)
z_, x_ = (
dir2vec[self.cfg.shape_init_mesh_up],
dir2vec[self.cfg.shape_init_mesh_front],
)
y_ = np.cross(z_, x_)
std2mesh = np.stack([x_, y_, z_], axis=0).T
mesh2std = np.linalg.inv(std2mesh)
# scaling
scale = np.abs(mesh.vertices).max()
mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params
mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T
sdf = SDF(mesh.vertices, mesh.faces)
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
# add a negative signed here
# as in pysdf the inside of the shape has positive signed distance
return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(
points_rand
)[..., None]
get_gt_sdf = func
else:
raise ValueError(
f"Unknown shape initialization type: {self.cfg.shape_init}"
)
# Initialize SDF to a given shape when no weights are provided or force_shape_init is True
optim = torch.optim.Adam(self.parameters(), lr=1e-3)
for _ in tqdm(
range(1000),
desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:",
disable=get_rank() != 0,
):
points_rand = (
torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0
)
sdf_gt = get_gt_sdf(points_rand)
sdf_pred = self.forward_sdf(points_rand)
loss = F.mse_loss(sdf_pred, sdf_gt)
optim.zero_grad()
loss.backward()
optim.step()
# explicit broadcast to ensure param consistency across ranks
for param in self.parameters():
|
@threestudio.register("implicit-sdf")
class ImplicitSDF(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: Union[
float, str
] = 0.01 # in [float, "progressive"]
shape_init: Optional[str] = None
shape_init_params: Optional[Any] = None
shape_init_mesh_up: str = "+z"
shape_init_mesh_front: str = "+x"
force_shape_init: bool = False
sdf_bias: Union[float, str] = 0.0
sdf_bias_params: Optional[Any] = None
# no need to removal outlier for SDF
isosurface_remove_outliers: bool = False
cfg: Config
def configure(self) -> None:
super().configure()
self.encoding = get_encoding(
self.cfg.n_input_dims, self.cfg.pos_encoding_config
)
self.sdf_network = get_mlp(
self.encoding.n_output_dims, 1, self.cfg.mlp_network_config
)
if self.cfg.n_feature_dims > 0:
self.feature_network = get_mlp(
self.encoding.n_output_dims,
self.cfg.n_feature_dims,
self.cfg.mlp_network_config,
)
if self.cfg.normal_type == "pred":
self.normal_network = get_mlp(
self.encoding.n_output_dims, 3, self.cfg.mlp_network_config
)
if self.cfg.isosurface_deformable_grid:
assert (
self.cfg.isosurface_method == "mt"
), "isosurface_deformable_grid only works with mt"
self.deformation_network = get_mlp(
self.encoding.n_output_dims, 3, self.cfg.mlp_network_config
)
self.finite_difference_normal_eps: Optional[float] = None
def initialize_shape(self) -> None:
if self.cfg.shape_init is None and not self.cfg.force_shape_init:
return
# do not initialize shape if weights are provided
if self.cfg.weights is not None and not self.cfg.force_shape_init:
return
if self.cfg.sdf_bias != 0.0:
threestudio.warn(
"shape_init and sdf_bias are both specified, which may lead to unexpected results."
)
get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]]
assert isinstance(self.cfg.shape_init, str)
if self.cfg.shape_init == "ellipsoid":
assert (
isinstance(self.cfg.shape_init_params, Sized)
and len(self.cfg.shape_init_params) == 3
)
size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
return ((points_rand / size) ** 2).sum(
dim=-1, keepdim=True
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid
get_gt_sdf = func
elif self.cfg.shape_init == "sphere":
assert isinstance(self.cfg.shape_init_params, float)
radius = self.cfg.shape_init_params
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius
get_gt_sdf = func
elif self.cfg.shape_init.startswith("mesh:"):
assert isinstance(self.cfg.shape_init_params, float)
mesh_path = self.cfg.shape_init[5:]
if not os.path.exists(mesh_path):
raise ValueError(f"Mesh file {mesh_path} does not exist.")
scene = trimesh.load(mesh_path)
if isinstance(scene, trimesh.Trimesh):
mesh = scene
elif isinstance(scene, trimesh.scene.Scene):
mesh = trimesh.Trimesh()
for obj in scene.geometry.values():
mesh = trimesh.util.concatenate([mesh, obj])
else:
raise ValueError(f"Unknown mesh type at {mesh_path}.")
# move to center
centroid = mesh.vertices.mean(0)
mesh.vertices = mesh.vertices - centroid
# align to up-z and front-x
dirs = ["+x", "+y", "+z", "-x", "-y", "-z"]
dir2vec = {
"+x": np.array([1, 0, 0]),
"+y": np.array([0, 1, 0]),
"+z": np.array([0, 0, 1]),
"-x": np.array([-1, 0, 0]),
"-y": np.array([0, -1, 0]),
"-z": np.array([0, 0, -1]),
}
if (
self.cfg.shape_init_mesh_up not in dirs
or self.cfg.shape_init_mesh_front not in dirs
):
raise ValueError(
f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}."
)
if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:
raise ValueError(
"shape_init_mesh_up and shape_init_mesh_front must be orthogonal."
)
z_, x_ = (
dir2vec[self.cfg.shape_init_mesh_up],
dir2vec[self.cfg.shape_init_mesh_front],
)
y_ = np.cross(z_, x_)
std2mesh = np.stack([x_, y_, z_], axis=0).T
mesh2std = np.linalg.inv(std2mesh)
# scaling
scale = np.abs(mesh.vertices).max()
mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params
mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T
sdf = SDF(mesh.vertices, mesh.faces)
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]:
# add a negative signed here
# as in pysdf the inside of the shape has positive signed distance
return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(
points_rand
)[..., None]
get_gt_sdf = func
else:
raise ValueError(
f"Unknown shape initialization type: {self.cfg.shape_init}"
)
# Initialize SDF to a given shape when no weights are provided or force_shape_init is True
optim = torch.optim.Adam(self.parameters(), lr=1e-3)
for _ in tqdm(
range(1000),
desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:",
disable=get_rank() != 0,
):
points_rand = (
torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0
)
sdf_gt = get_gt_sdf(points_rand)
sdf_pred = self.forward_sdf(points_rand)
loss = F.mse_loss(sdf_pred, sdf_gt)
optim.zero_grad()
loss.backward()
optim.step()
# explicit broadcast to ensure param consistency across ranks
for param in self.parameters(): | broadcast(param, src=0) | 5 | 2023-10-24 19:02:44+00:00 | 8k |
princeton-nlp/LLM-Shearing | llmshearing/train.py | [
{
"identifier": "DebugCallback",
"path": "llmshearing/callbacks/callbacks.py",
"snippet": "class DebugCallback(Callback):\n def batch_start(self, state: State, logger: Logger) -> None:\n for b in state.batch[\"input_ids\"]:\n print(b) "
},
{
"identifier": "DynamicLoadingCallback",
"path": "llmshearing/callbacks/dynamic_loading_callback.py",
"snippet": "class DynamicLoadingCallback(Callback):\n \"\"\" \n Callback for dynamic loading of data from different domains. The key components include 1) calculate the new proportion after each evaluation step; 2) update proportion in the dataset objective; 3) save the used domain ids after each epoch for resuming training from a previous checkpoint to make sure that used samples are not used again.\n \"\"\"\n def __init__(self, \n target_loss: List[float] = None, \n proportion: List[float] = None,\n set_names: List[str] = None,\n update_type: str =\"doremi\", \n ) -> None:\n self.set_names = set_names\n self.n_domains = len(set_names)\n self.update_type = update_type \n self.target_loss = target_loss\n self.proportion = proportion\n self.count = -1\n self.used_domain_ids = [[] for _ in range(self.n_domains)]\n print(\"Target loss:\", self.target_loss)\n \n def update_proportion(self, current_prop, losses):\n \"\"\" Update the proportion of each domain \"\"\"\n diff = torch.tensor(losses) - torch.tensor(self.target_loss)\n eta = 1.\n c = 1e-4 # following Doremi (Xie et al., 2023)\n \n if self.update_type == \"doremi\": # update with exponential descent\n updated_alpha = torch.log(torch.tensor(current_prop)) + eta * diff \n updated_alpha = torch.nn.functional.softmax(updated_alpha, dim=0)\n updated_domain_weights = (1-c) * updated_alpha + c / self.n_domains\n elif self.update_type == \"bandit\": \n updated_alpha = torch.tensor(current_prop) + eta * diff \n updated_alpha = torch.nn.functional.softmax(updated_alpha, dim=0)\n updated_domain_weights = (1-c) * updated_alpha + c / self.n_domains\n elif self.update_type == \"constant\": # constant proportion\n updated_domain_weights = torch.tensor(current_prop)\n \n updated_domain_weights = updated_domain_weights.numpy().astype('float64')\n updated_domain_weights = updated_domain_weights / updated_domain_weights.sum()\n return updated_domain_weights.tolist()\n \n def after_train_batch(self, state: State, logger: Logger) -> None:\n \"\"\" Print out the number of used samples in each domain after each training batch, and log the updated proportion of each domain \"\"\"\n idx = state.batch[\"idx\"]\n sets = state.batch[\"set\"]\n all_idx = torch.cat(dist.all_gather(idx))\n all_sets = torch.cat(dist.all_gather(sets))\n dist.barrier() \n \n for i in range(self.n_domains):\n mask = all_sets == i\n domain_idx = all_idx[mask]\n self.used_domain_ids[i].extend(domain_idx.cpu().tolist())\n # for debugging\n # print(f\"domain {i} used {mask.sum().item()} new samples\")\n\n prop = state.train_dataloader.dataset.proportion\n for domain in self.set_names:\n logger.log_metrics({f'metrics/train/{domain}_weight': round(prop[self.set_names.index(domain)], 4)})\n \n def eval_end(self, state: State, logger: Logger) -> None:\n \"\"\" Update the proportion of each domain after each evaluation and update the dataset \"\"\"\n current_prop = state.train_dataloader.dataset.proportion\n losses = []\n for domain in self.set_names:\n losses.append(state.eval_metrics[\"eval\"][f\"{domain}_LanguageCrossEntropy\"].compute().item())\n new_proportion = self.update_proportion(current_prop, losses)\n state.train_dataloader.dataset.update_proportion(new_proportion)\n \n def state_dict(self) -> Dict[str, Any]:\n \"\"\" Save the used domain ids after each epoch, for resuming training from a previous checkpoint to make sure that used samples are not used again \"\"\"\n return {\"used_domain_ids\": self.used_domain_ids}\n \n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\" Load the used domain ids \"\"\"\n self.used_domain_ids = state_dict[\"used_domain_ids\"]"
},
{
"identifier": "PruningCallback",
"path": "llmshearing/callbacks/pruning_callback.py",
"snippet": "class PruningCallback(Callback):\n \"\"\"\n The interplay of pruning and the main training process is implemented fully based on the callback mechanism.\n \"\"\"\n def __init__(self, save_folder: str = None) -> None:\n self.save_folder = save_folder\n\n def plug_in_pruned_steps(self, state: State, logger: Logger):\n \"\"\" Hack: Add pruned_steps to the batch to calculate target sparsity during the pruning warmup stage \"\"\"\n if getattr(state.model.model, \"l0_module\", None) is not None:\n input_ids = state.batch[\"input_ids\"]\n state.batch[\"pruned_steps\"] = torch.LongTensor([state.timestamp.batch.value] * len(input_ids)).to(input_ids.device)\n \n def batch_start(self, state: State, logger: Logger):\n self.plug_in_pruned_steps(state, logger)\n \n def eval_batch_start(self, state: State, logger: Logger):\n self.plug_in_pruned_steps(state, logger)\n \n def after_train_batch(self, state: State, logger: Logger) -> None:\n \"\"\" Log information from the L0 module after each training batch \"\"\"\n l0_output = state.outputs[\"l0_output\"]\n if l0_output is not None:\n logger.log_metrics({f'metrics/train/{name}': val.cpu().item() if torch.is_tensor(val) else val for (name, val) in l0_output[1].items()})\n \n def eval_end(self, state: State, logger: Logger) -> None:\n \"\"\" Save the deterministic masks after each evaluation for analysis \"\"\"\n zs = state.outputs[\"zs\"]\n zs = {key: zs[key].detach().float().cpu().numpy() for key in zs}\n step = state.timestamp.batch.value\n torch.save(zs, os.path.join(self.save_folder.replace(\"{run_name}\", state.run_name), f\"zs_s{step}.pt\"))"
},
{
"identifier": "build_text_dataloader",
"path": "llmshearing/datasets/load_text_dataloader.py",
"snippet": "def build_text_dataloader(cfg: DictConfig, device_batch_size: int, dynamic: bool = False, \n set_names: str = None, proportion: List[float] = None) -> DataLoader:\n \"\"\"Builds a text dataloader.\n\n Args:\n cfg (DictConfig): Configuration dictionary.\n device_batch_size (int): Batch size for one single device.\n dynamic (bool, optional): Whether to use dynamic streaming dataset to load data from each \n domain dynamically. Defaults to False.\n set_names (str, optional): Name of the dataset. Defaults to None.\n proportion (List[float], optional): Initial proportion of each domain in the dataset. Defaults to None.\n\n Returns:\n DataLoader: A PyTorch DataLoader object.\n \"\"\"\n \n if dynamic:\n dataset = TextDynamicStreamingDataset(local=cfg.dataset.local,\n max_seq_len=cfg.dataset.max_seq_len,\n batch_size=device_batch_size,\n shuffle=cfg.dataset.get(\n 'shuffle', False),\n shuffle_seed=cfg.dataset.get(\n 'shuffle_seed', 9176),\n num_canonical_nodes=cfg.dataset.get(\n 'num_canonical_nodes', 128),\n proportion=proportion,\n set_names=set_names,\n is_uint16=cfg.dataset.get(\"is_uint16\", False))\n else:\n dataset = TextStreamingDataset(\n local=cfg.dataset.local,\n max_seq_len=cfg.dataset.max_seq_len,\n split=cfg.dataset.get('split', None),\n shuffle=cfg.dataset.get('shuffle', False),\n shuffle_seed=cfg.dataset.get('shuffle_seed', 9176),\n num_canonical_nodes=cfg.dataset.get(\n 'num_canonical_nodes', 128),\n batch_size=device_batch_size,\n is_uint16=cfg.dataset.get(\"is_uint16\", False))\n\n tokenizer = AutoTokenizer.from_pretrained(cfg.dataset.tokenizer_name)\n if isinstance(dataset[0], Mapping) and \"set\" in dataset[0]:\n COLLATE_FN = DataCollatorForLMWithSetName\n collate_fn = COLLATE_FN(\n set_names=set_names,\n tokenizer=tokenizer,\n mlm=False)\n else:\n COLLATE_FN = transformers.DataCollatorForLanguageModeling\n collate_fn = COLLATE_FN(\n tokenizer=tokenizer,\n mlm=False,\n )\n \n return DataLoader(\n dataset,\n collate_fn=collate_fn,\n batch_size=device_batch_size,\n drop_last=cfg.drop_last,\n num_workers=cfg.num_workers,\n pin_memory=cfg.get('pin_memory', True),\n prefetch_factor=cfg.get('prefetch_factor', 2),\n persistent_workers=cfg.get('persistent_workers', True),\n timeout=cfg.get('timeout', 0),\n )"
},
{
"identifier": "COMPOSER_MODEL_REGISTRY",
"path": "llmshearing/models/model_registry.py",
"snippet": "COMPOSER_MODEL_REGISTRY = {\n 'mosaic_llama_125m': ComposerMosaicLlama,\n 'mosaic_llama_370m': ComposerMosaicLlama,\n \"mosaic_llama_1.3b\": ComposerMosaicLlama,\n \"mosaic_llama_3b\": ComposerMosaicLlama,\n 'mosaic_llama_7b': ComposerMosaicLlama,\n 'mosaic_llama_13b': ComposerMosaicLlama,\n 'mosaic_llama_30b': ComposerMosaicLlama,\n 'mosaic_llama_65b': ComposerMosaicLlama,\n 'mosaic_pythia_70m': ComposerMosaicPythia,\n 'mosaic_pythia_160m': ComposerMosaicPythia,\n 'mosaic_pythia_410m': ComposerMosaicPythia,\n 'mosaic_pythia_1.4b': ComposerMosaicPythia,\n 'mosaic_llama2_370m': ComposerMosaicLlama,\n \"mosaic_llama2_1.3b\": ComposerMosaicLlama,\n \"mosaic_llama2_3b\": ComposerMosaicLlama,\n 'mosaic_llama2_7b': ComposerMosaicLlama,\n 'mosaic_llama2_13b': ComposerMosaicLlama,\n 'mosaic_together_3b': ComposerMosaicPythia \n}"
}
] | import os
import sys
import warnings
import torch
from types import MethodType
from typing import Any, Dict
from composer import Logger, State, Trainer
from composer.callbacks.checkpoint_saver import CheckpointSaver
from composer.core import Evaluator, Event
from composer.loggers import FileLogger
from composer.optim import DecoupledAdamW
from composer.utils import dist, get_device, reproducibility
from llmfoundry.optim import (DecoupledAdaLRLion, DecoupledClipLion,
DecoupledLionW, DecoupledLionW_8bit)
from llmfoundry.utils.builders import (build_algorithm, build_callback,
build_logger, build_scheduler)
from llmfoundry.utils.config_utils import (log_config, pop_config,
update_batch_size_info)
from omegaconf import DictConfig
from omegaconf import OmegaConf as om
from torch import nn
from torch.optim.optimizer import Optimizer
from llmshearing.callbacks.callbacks import DebugCallback
from llmshearing.callbacks.dynamic_loading_callback import \
DynamicLoadingCallback
from llmshearing.callbacks.pruning_callback import PruningCallback
from llmshearing.datasets.load_text_dataloader import build_text_dataloader
from llmshearing.models.model_registry import COMPOSER_MODEL_REGISTRY
from llmshearing.datasets.state import _dataset_state_dict | 4,340 | optimizer_config: Dict[str, Any]) -> Optimizer:
"""
build optimizer that consists of three groups of parameters:
- main_model_params: parameters of the main model
- l0_module_params: parameters of the l0 module
- lagrange_params: parameters of the lagrange multipliers
"""
param_groups = {}
main_model_params = [p for n, p in model.named_parameters() if "l0_module" not in n]
l0_module_params = [p for n, p in model.named_parameters() if "l0_module" in n and "lambda" not in n]
lagrange_params = [p for n, p in model.named_parameters() if "l0_module" in n and "lambda" in n]
param_groups = [{"params": main_model_params, "lr": optimizer_config.lr}]
lag_lr = pop_config(optimizer_config, "lag_lr")
if len(l0_module_params) > 0:
param_groups.extend([{"params": l0_module_params, "lr": lag_lr}, {"params": lagrange_params, "lr": -(lag_lr)}])
for i, group in enumerate(param_groups):
print(f"Group {i}:", f"{len(group['params'])} tensors", f"{sum(p.numel() for p in group['params'])} params", f"{group['lr']:.2e} lr")
if name == 'decoupled_adamw':
return DecoupledAdamW(param_groups, **optimizer_config)
elif name == 'decoupled_lionw':
return DecoupledLionW(param_groups, **optimizer_config)
elif name == 'clip_lion':
return DecoupledClipLion(param_groups, **optimizer_config)
elif name == 'adalr_lion':
return DecoupledAdaLRLion(param_groups, **optimizer_config)
elif name == 'decoupled_lionw_8b':
return DecoupledLionW_8bit(param_groups, **optimizer_config)
else:
raise ValueError(f'Not sure how to build optimizer: {name}')
def main(cfg):
""" Main training function """
print("Start running ")
warnings.filterwarnings(
action='ignore',
category=UserWarning,
message=f'torch.distributed.*_base is a private function and will be deprecated.*'
)
cfg.dist_timeout = cfg.get('dist_timeout', 1800.0)
dist.initialize_dist(get_device(None), timeout=cfg.dist_timeout)
# Check for incompatibilities between the model and data loaders
validate_config(cfg)
# Filter deprecation warning from torch internal usage
warnings.filterwarnings(
action='ignore',
category=UserWarning,
message='torch.distributed.*_base is a private function and will be deprecated.*'
)
reproducibility.seed_all(cfg.seed)
# Run Name
if cfg.get('run_name') is None:
cfg.run_name = os.environ.get('COMPOSER_RUN_NAME', 'llm')
# Get batch size info
cfg = update_batch_size_info(cfg)
# Read FSDP Config as a dict
fsdp_config = cfg.get('fsdp_config', None)
fsdp_config = om.to_container(fsdp_config,
resolve=True) if fsdp_config else None
# Restrict model init_device to 'meta' and 'cpu',
# when multiple GPUs are available.
# Also 'meta' is only valid when using FSDP
init_device = cfg.model.get('init_device', 'cpu')
assert init_device in ['meta', 'cpu']
if fsdp_config is None and init_device == 'meta':
warnings.warn(
"Using `cfg.model.init_device='meta'` is only valid when using FSDP! " +\
"Reverting to `cfg.model.init_device='cpu'`.")
cfg.model.init_device = 'cpu'
# Loggers
loggers = [
build_logger(name, logger_cfg)
for name, logger_cfg in (cfg.get('loggers') or {}).items()
]
save_folder = cfg.save_folder.replace('{run_name}', cfg.run_name)
filename = f"{save_folder}/logs.txt"
count = 1
while os.path.exists(filename):
print(f"File {filename} already exists")
filename = f"{save_folder}/logs_{count}.txt"
count += 1
print(f"Logging to {filename}")
loggers.append(FileLogger(filename=filename,
buffer_size=1,
flush_interval=50))
# Build Model
print('Initializing model...')
if cfg.callbacks.data_loading.dynamic:
cfg.model.set_names = cfg.callbacks.data_loading.set_names
model = build_composer_model(cfg.model)
print(model)
print(cfg.model.l0_module)
state_dict = load_weights(cfg)
if state_dict is not None:
load_state_dict(model, state_dict)
cfg.n_params = sum(p.numel() for p in model.parameters())
print(f'{cfg.n_params=:.2e}')
if hasattr(model, 'num_fwd_flops'):
print(f'{model.num_fwd_flops=:.2e}')
# set names has to be part of the config
assert getattr(cfg.callbacks.data_loading, 'set_names', None) is not None, "please specify the set (domain) names in the config"
# Dataloaders
print('Building train loader...')
| # Copyright 2022 MosaicML Examples authors
# SPDX-License-Identifier: Apache-2.0
def is_one_hour(run_name: str):
""" Check if the run name is for one hour training. """
return run_name.startswith("ONE_HOUR")
def exit_batch_checkpoint(self, state: State, logger: Logger):
""" Exit the program after saving the checkpoint. """
if self.save_interval(state, Event.BATCH_CHECKPOINT) and self.last_checkpoint_batch != state.timestamp.batch:
self._save_checkpoint(
state,
logger,
)
print("Ending program at batch", state.timestamp.batch)
print(self.folder)
sys.exit()
def validate_config(cfg: DictConfig):
"""Validates compatible model and dataloader selection."""
loaders = [cfg.train_loader]
if 'eval_loader' in cfg:
loaders.append(cfg.eval_loader)
def build_composer_model(cfg: DictConfig):
""" build the composer model """
warnings.filterwarnings(
action='ignore',
message='Torchmetrics v0.9 introduced a new argument class property')
return COMPOSER_MODEL_REGISTRY[cfg.name](cfg)
def load_weights(cfg: DictConfig):
""" load weights """
if cfg.model.get('path', None):
state_dict = torch.load(cfg.model.path) # for loading pre-trained llama
if "state" in state_dict:
state_dict = state_dict["state"]["model"]
print("Loaded model from path: ", cfg.model.path)
return state_dict
return None
def load_state_dict(model: nn.Module, state_dict: Dict[str, Any]):
""" load state dict to the model """
result = model.load_state_dict(state_dict, strict=False)
print("Model load state dict result: ", result)
print("Having missing rotary_emb.inv_freq keys is normal")
def build_optimizer(model: torch.nn.Module, name: str,
optimizer_config: Dict[str, Any]) -> Optimizer:
"""
build optimizer that consists of three groups of parameters:
- main_model_params: parameters of the main model
- l0_module_params: parameters of the l0 module
- lagrange_params: parameters of the lagrange multipliers
"""
param_groups = {}
main_model_params = [p for n, p in model.named_parameters() if "l0_module" not in n]
l0_module_params = [p for n, p in model.named_parameters() if "l0_module" in n and "lambda" not in n]
lagrange_params = [p for n, p in model.named_parameters() if "l0_module" in n and "lambda" in n]
param_groups = [{"params": main_model_params, "lr": optimizer_config.lr}]
lag_lr = pop_config(optimizer_config, "lag_lr")
if len(l0_module_params) > 0:
param_groups.extend([{"params": l0_module_params, "lr": lag_lr}, {"params": lagrange_params, "lr": -(lag_lr)}])
for i, group in enumerate(param_groups):
print(f"Group {i}:", f"{len(group['params'])} tensors", f"{sum(p.numel() for p in group['params'])} params", f"{group['lr']:.2e} lr")
if name == 'decoupled_adamw':
return DecoupledAdamW(param_groups, **optimizer_config)
elif name == 'decoupled_lionw':
return DecoupledLionW(param_groups, **optimizer_config)
elif name == 'clip_lion':
return DecoupledClipLion(param_groups, **optimizer_config)
elif name == 'adalr_lion':
return DecoupledAdaLRLion(param_groups, **optimizer_config)
elif name == 'decoupled_lionw_8b':
return DecoupledLionW_8bit(param_groups, **optimizer_config)
else:
raise ValueError(f'Not sure how to build optimizer: {name}')
def main(cfg):
""" Main training function """
print("Start running ")
warnings.filterwarnings(
action='ignore',
category=UserWarning,
message=f'torch.distributed.*_base is a private function and will be deprecated.*'
)
cfg.dist_timeout = cfg.get('dist_timeout', 1800.0)
dist.initialize_dist(get_device(None), timeout=cfg.dist_timeout)
# Check for incompatibilities between the model and data loaders
validate_config(cfg)
# Filter deprecation warning from torch internal usage
warnings.filterwarnings(
action='ignore',
category=UserWarning,
message='torch.distributed.*_base is a private function and will be deprecated.*'
)
reproducibility.seed_all(cfg.seed)
# Run Name
if cfg.get('run_name') is None:
cfg.run_name = os.environ.get('COMPOSER_RUN_NAME', 'llm')
# Get batch size info
cfg = update_batch_size_info(cfg)
# Read FSDP Config as a dict
fsdp_config = cfg.get('fsdp_config', None)
fsdp_config = om.to_container(fsdp_config,
resolve=True) if fsdp_config else None
# Restrict model init_device to 'meta' and 'cpu',
# when multiple GPUs are available.
# Also 'meta' is only valid when using FSDP
init_device = cfg.model.get('init_device', 'cpu')
assert init_device in ['meta', 'cpu']
if fsdp_config is None and init_device == 'meta':
warnings.warn(
"Using `cfg.model.init_device='meta'` is only valid when using FSDP! " +\
"Reverting to `cfg.model.init_device='cpu'`.")
cfg.model.init_device = 'cpu'
# Loggers
loggers = [
build_logger(name, logger_cfg)
for name, logger_cfg in (cfg.get('loggers') or {}).items()
]
save_folder = cfg.save_folder.replace('{run_name}', cfg.run_name)
filename = f"{save_folder}/logs.txt"
count = 1
while os.path.exists(filename):
print(f"File {filename} already exists")
filename = f"{save_folder}/logs_{count}.txt"
count += 1
print(f"Logging to {filename}")
loggers.append(FileLogger(filename=filename,
buffer_size=1,
flush_interval=50))
# Build Model
print('Initializing model...')
if cfg.callbacks.data_loading.dynamic:
cfg.model.set_names = cfg.callbacks.data_loading.set_names
model = build_composer_model(cfg.model)
print(model)
print(cfg.model.l0_module)
state_dict = load_weights(cfg)
if state_dict is not None:
load_state_dict(model, state_dict)
cfg.n_params = sum(p.numel() for p in model.parameters())
print(f'{cfg.n_params=:.2e}')
if hasattr(model, 'num_fwd_flops'):
print(f'{model.num_fwd_flops=:.2e}')
# set names has to be part of the config
assert getattr(cfg.callbacks.data_loading, 'set_names', None) is not None, "please specify the set (domain) names in the config"
# Dataloaders
print('Building train loader...') | train_loader = build_text_dataloader(cfg.train_loader, | 3 | 2023-10-16 12:26:08+00:00 | 8k |
hugoycj/Instant-angelo | systems/base.py | [
{
"identifier": "parse_optimizer",
"path": "systems/utils.py",
"snippet": "def parse_optimizer(config, model):\n if hasattr(config, 'params'):\n params = [{'params': get_parameters(model, name), 'name': name, **args} for name, args in config.params.items()]\n rank_zero_debug('Specify optimizer params:', config.params)\n else:\n params = model.parameters()\n if config.name in ['FusedAdam']:\n import apex\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim"
},
{
"identifier": "parse_scheduler",
"path": "systems/utils.py",
"snippet": "def parse_scheduler(config, optimizer):\n interval = config.get('interval', 'epoch')\n assert interval in ['epoch', 'step']\n if config.name == 'SequentialLR':\n scheduler = {\n 'scheduler': SequentialLR(optimizer, [parse_scheduler(conf, optimizer)['scheduler'] for conf in config.schedulers], milestones=config.milestones),\n 'interval': interval\n }\n elif config.name == 'Chained':\n scheduler = {\n 'scheduler': ChainedScheduler([parse_scheduler(conf, optimizer)['scheduler'] for conf in config.schedulers]),\n 'interval': interval\n }\n else:\n scheduler = {\n 'scheduler': get_scheduler(config.name)(optimizer, **config.args),\n 'interval': interval\n }\n return scheduler"
},
{
"identifier": "update_module_step",
"path": "systems/utils.py",
"snippet": "def update_module_step(m, epoch, global_step):\n if hasattr(m, 'update_step'):\n m.update_step(epoch, global_step)"
},
{
"identifier": "SaverMixin",
"path": "utils/mixins.py",
"snippet": "class SaverMixin():\n @property\n def save_dir(self):\n return self.config.save_dir\n \n def convert_data(self, data):\n if isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError('Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting', type(data))\n \n def get_save_path(self, filename):\n save_path = os.path.join(self.save_dir, filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n \n DEFAULT_RGB_KWARGS = {'data_format': 'CHW', 'data_range': (0, 1)}\n DEFAULT_UV_KWARGS = {'data_format': 'CHW', 'data_range': (0, 1), 'cmap': 'checkerboard'}\n DEFAULT_GRAYSCALE_KWARGS = {'data_range': None, 'cmap': 'jet'}\n\n def get_rgb_image_(self, img, data_format, data_range):\n img = self.convert_data(img)\n assert data_format in ['CHW', 'HWC']\n if data_format == 'CHW':\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = ((img - data_range[0]) / (data_range[1] - data_range[0]) * 255.).astype(np.uint8)\n imgs = [img[...,start:start+3] for start in range(0, img.shape[-1], 3)]\n imgs = [img_ if img_.shape[-1] == 3 else np.concatenate([img_, np.zeros((img_.shape[0], img_.shape[1], 3 - img_.shape[2]), dtype=img_.dtype)], axis=-1) for img_ in imgs]\n img = np.concatenate(imgs, axis=1)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n \n def save_rgb_image(self, filename, img, data_format=DEFAULT_RGB_KWARGS['data_format'], data_range=DEFAULT_RGB_KWARGS['data_range']):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(self.get_save_path(filename), img)\n \n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in ['CHW', 'HWC']\n if data_format == 'CHW':\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in ['checkerboard', 'color']\n if cmap == 'checkerboard':\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[...,0] + mask[...,1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == 'color':\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n \n def save_uv_image(self, filename, img, data_format=DEFAULT_UV_KWARGS['data_format'], data_range=DEFAULT_UV_KWARGS['data_range'], cmap=DEFAULT_UV_KWARGS['cmap']):\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(self.get_save_path(filename), img)\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, 'jet', 'magma']\n if cmap == None:\n img = (img * 255.).astype(np.uint8)\n img = np.repeat(img[...,None], 3, axis=2)\n elif cmap == 'jet':\n img = (img * 255.).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == 'magma':\n img = 1. - img\n base = cm.get_cmap('magma')\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\",\n base(np.linspace(0, 1, num_bins)),\n num_bins\n )(np.linspace(0, 1, num_bins))[:,:3]\n a = np.floor(img * 255.)\n b = (a + 1).clip(max=255.)\n f = img * 255. - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[...,None]\n img = (img * 255.).astype(np.uint8)\n return img\n\n def save_grayscale_image(self, filename, img, data_range=DEFAULT_GRAYSCALE_KWARGS['data_range'], cmap=DEFAULT_GRAYSCALE_KWARGS['cmap']):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(self.get_save_path(filename), img)\n\n def get_image_grid_(self, imgs):\n if isinstance(imgs[0], list):\n return np.concatenate([self.get_image_grid_(row) for row in imgs], axis=0)\n cols = []\n for col in imgs:\n assert col['type'] in ['rgb', 'uv', 'grayscale']\n if col['type'] == 'rgb':\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col['kwargs'])\n cols.append(self.get_rgb_image_(col['img'], **rgb_kwargs))\n elif col['type'] == 'uv':\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col['kwargs'])\n cols.append(self.get_uv_image_(col['img'], **uv_kwargs))\n elif col['type'] == 'grayscale':\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col['kwargs'])\n cols.append(self.get_grayscale_image_(col['img'], **grayscale_kwargs))\n return np.concatenate(cols, axis=1)\n \n def save_image_grid(self, filename, imgs):\n img = self.get_image_grid_(imgs)\n cv2.imwrite(self.get_save_path(filename), img)\n \n def save_image(self, filename, img):\n img = self.convert_data(img)\n assert img.dtype == np.uint8\n if img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(self.get_save_path(filename), img)\n \n def save_cubemap(self, filename, img, data_range=(0, 1)):\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[...,start:start+3]\n img_ = np.stack([self.get_rgb_image_(img_[i], 'HWC', data_range) for i in range(img_.shape[0])], axis=0)\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate([\n np.concatenate([placeholder, img_[2], placeholder, placeholder], axis=1),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate([placeholder, img_[3], placeholder, placeholder], axis=1)\n ], axis=0)\n img_full = cv2.cvtColor(img_full, cv2.COLOR_RGB2BGR)\n imgs_full.append(img_full)\n \n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(self.get_save_path(filename), imgs_full)\n\n def save_data(self, filename, data):\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith('.npz'):\n filename += '.npz'\n np.savez(self.get_save_path(filename), **data)\n else:\n if not filename.endswith('.npy'):\n filename += '.npy'\n np.save(self.get_save_path(filename), data)\n \n def save_state_dict(self, filename, data):\n torch.save(data, self.get_save_path(filename))\n \n def save_img_sequence(self, filename, img_dir, matcher, save_format='gif', fps=30):\n assert save_format in ['gif', 'mp4']\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.save_dir, img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n \n if save_format == 'gif':\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(self.get_save_path(filename), imgs, fps=fps, palettesize=256)\n elif save_format == 'mp4':\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(self.get_save_path(filename), imgs, fps=fps)\n \n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None, v_rgb=None, v_norm=None):\n v_pos, t_pos_idx = self.convert_data(v_pos), self.convert_data(t_pos_idx)\n if v_rgb is not None:\n v_rgb = self.convert_data(v_rgb)\n if v_norm is not None:\n v_norm = self.convert_data(v_rgb)\n import trimesh\n mesh = trimesh.Trimesh(\n vertices=v_pos,\n faces=t_pos_idx,\n vertex_colors=v_rgb,\n vertex_normals=v_norm\n )\n mesh.export(self.get_save_path(filename))\n \n def save_file(self, filename, src_path):\n shutil.copyfile(src_path, self.get_save_path(filename))\n \n def save_json(self, filename, payload):\n with open(self.get_save_path(filename), 'w') as f:\n f.write(json.dumps(payload))"
},
{
"identifier": "config_to_primitive",
"path": "utils/misc.py",
"snippet": "def config_to_primitive(config, resolve=True):\n return OmegaConf.to_container(config, resolve=resolve)"
},
{
"identifier": "get_rank",
"path": "utils/misc.py",
"snippet": "def get_rank():\n # SLURM_PROCID can be set even if SLURM is not managing the multiprocessing,\n # therefore LOCAL_RANK needs to be checked first\n rank_keys = (\"RANK\", \"LOCAL_RANK\", \"SLURM_PROCID\", \"JSM_NAMESPACE_RANK\")\n for key in rank_keys:\n rank = os.environ.get(key)\n if rank is not None:\n return int(rank)\n return 0"
}
] | import pytorch_lightning as pl
import models
from systems.utils import parse_optimizer, parse_scheduler, update_module_step
from utils.mixins import SaverMixin
from utils.misc import config_to_primitive, get_rank | 4,188 |
class BaseSystem(pl.LightningModule, SaverMixin):
"""
Two ways to print to console:
1. self.print: correctly handle progress bar
2. rank_zero_info: use the logging module
"""
def __init__(self, config):
super().__init__()
self.config = config
self.rank = get_rank()
self.prepare()
self.model = models.make(self.config.model.name, self.config.model)
def prepare(self):
pass
def forward(self, batch):
raise NotImplementedError
def C(self, value):
if isinstance(value, int) or isinstance(value, float):
pass
else:
value = config_to_primitive(value)
if not isinstance(value, list):
raise TypeError('Scalar specification only supports list, got', type(value))
if len(value) == 3:
value = [0] + value
assert len(value) == 4
start_step, start_value, end_value, end_step = value
if isinstance(end_step, int):
current_step = self.global_step
value = start_value + (end_value - start_value) * max(min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0)
elif isinstance(end_step, float):
current_step = self.current_epoch
value = start_value + (end_value - start_value) * max(min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0)
return value
def preprocess_data(self, batch, stage):
pass
"""
Implementing on_after_batch_transfer of DataModule does the same.
But on_after_batch_transfer does not support DP.
"""
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.dataset = self.trainer.datamodule.train_dataloader().dataset
self.preprocess_data(batch, 'train')
update_module_step(self.model, self.current_epoch, self.global_step)
def on_validation_batch_start(self, batch, batch_idx, dataloader_idx):
self.dataset = self.trainer.datamodule.val_dataloader().dataset
self.preprocess_data(batch, 'validation')
update_module_step(self.model, self.current_epoch, self.global_step)
def on_test_batch_start(self, batch, batch_idx, dataloader_idx):
self.dataset = self.trainer.datamodule.test_dataloader().dataset
self.preprocess_data(batch, 'test')
update_module_step(self.model, self.current_epoch, self.global_step)
def on_predict_batch_start(self, batch, batch_idx, dataloader_idx):
self.dataset = self.trainer.datamodule.predict_dataloader().dataset
self.preprocess_data(batch, 'predict')
update_module_step(self.model, self.current_epoch, self.global_step)
def training_step(self, batch, batch_idx):
raise NotImplementedError
"""
# aggregate outputs from different devices (DP)
def training_step_end(self, out):
pass
"""
"""
# aggregate outputs from different iterations
def training_epoch_end(self, out):
pass
"""
def validation_step(self, batch, batch_idx):
raise NotImplementedError
"""
# aggregate outputs from different devices when using DP
def validation_step_end(self, out):
pass
"""
def validation_epoch_end(self, out):
"""
Gather metrics from all devices, compute mean.
Purge repeated results using data index.
"""
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
def test_epoch_end(self, out):
"""
Gather metrics from all devices, compute mean.
Purge repeated results using data index.
"""
raise NotImplementedError
def export(self):
raise NotImplementedError
def configure_optimizers(self):
|
class BaseSystem(pl.LightningModule, SaverMixin):
"""
Two ways to print to console:
1. self.print: correctly handle progress bar
2. rank_zero_info: use the logging module
"""
def __init__(self, config):
super().__init__()
self.config = config
self.rank = get_rank()
self.prepare()
self.model = models.make(self.config.model.name, self.config.model)
def prepare(self):
pass
def forward(self, batch):
raise NotImplementedError
def C(self, value):
if isinstance(value, int) or isinstance(value, float):
pass
else:
value = config_to_primitive(value)
if not isinstance(value, list):
raise TypeError('Scalar specification only supports list, got', type(value))
if len(value) == 3:
value = [0] + value
assert len(value) == 4
start_step, start_value, end_value, end_step = value
if isinstance(end_step, int):
current_step = self.global_step
value = start_value + (end_value - start_value) * max(min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0)
elif isinstance(end_step, float):
current_step = self.current_epoch
value = start_value + (end_value - start_value) * max(min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0)
return value
def preprocess_data(self, batch, stage):
pass
"""
Implementing on_after_batch_transfer of DataModule does the same.
But on_after_batch_transfer does not support DP.
"""
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.dataset = self.trainer.datamodule.train_dataloader().dataset
self.preprocess_data(batch, 'train')
update_module_step(self.model, self.current_epoch, self.global_step)
def on_validation_batch_start(self, batch, batch_idx, dataloader_idx):
self.dataset = self.trainer.datamodule.val_dataloader().dataset
self.preprocess_data(batch, 'validation')
update_module_step(self.model, self.current_epoch, self.global_step)
def on_test_batch_start(self, batch, batch_idx, dataloader_idx):
self.dataset = self.trainer.datamodule.test_dataloader().dataset
self.preprocess_data(batch, 'test')
update_module_step(self.model, self.current_epoch, self.global_step)
def on_predict_batch_start(self, batch, batch_idx, dataloader_idx):
self.dataset = self.trainer.datamodule.predict_dataloader().dataset
self.preprocess_data(batch, 'predict')
update_module_step(self.model, self.current_epoch, self.global_step)
def training_step(self, batch, batch_idx):
raise NotImplementedError
"""
# aggregate outputs from different devices (DP)
def training_step_end(self, out):
pass
"""
"""
# aggregate outputs from different iterations
def training_epoch_end(self, out):
pass
"""
def validation_step(self, batch, batch_idx):
raise NotImplementedError
"""
# aggregate outputs from different devices when using DP
def validation_step_end(self, out):
pass
"""
def validation_epoch_end(self, out):
"""
Gather metrics from all devices, compute mean.
Purge repeated results using data index.
"""
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
def test_epoch_end(self, out):
"""
Gather metrics from all devices, compute mean.
Purge repeated results using data index.
"""
raise NotImplementedError
def export(self):
raise NotImplementedError
def configure_optimizers(self): | optim = parse_optimizer(self.config.system.optimizer, self.model) | 0 | 2023-10-22 02:53:17+00:00 | 8k |
HKUDS/GraphGPT | graphgpt/train/train_light.py | [
{
"identifier": "GraphChatTrainer",
"path": "graphgpt/train/graphchat_trainer.py",
"snippet": "class GraphChatTrainer(Trainer):\n\n def _save(self, output_dir: Optional[str] = None, state_dict=None):\n if getattr(self.args, 'tune_graph_mlp_adapter', False):\n # Save the model\n _state_dict = state_dict\n if _state_dict is None:\n # Only save the model itself if we are using distributed training\n model_to_save = unwrap_model(self.model)\n _state_dict = model_to_save.state_dict()\n\n weight_to_save = {}\n keys_to_match = ['graph_projector', 'embed_tokens', 'embed_in']\n for k, v in _state_dict.items():\n if any(key_match in k for key_match in keys_to_match):\n weight_to_save[k] = v\n\n current_folder = output_dir.split('/')[-1]\n parent_folder = os.path.dirname(output_dir)\n if current_folder.startswith('checkpoint-'):\n mm_projector_folder = os.path.join(parent_folder, \"graph_projector\")\n os.makedirs(mm_projector_folder, exist_ok=True)\n torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin'))\n else:\n torch.save(weight_to_save, os.path.join(output_dir, f'graph_projector.bin'))\n\n super(GraphChatTrainer, self)._save(output_dir, state_dict)"
},
{
"identifier": "conversation",
"path": "graphgpt/conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):"
},
{
"identifier": "GraphGPT_pl",
"path": "graphgpt/model/GraphLlama_pl.py",
"snippet": "class GraphGPT_pl(LightningModule): \n def __init__(self,\n training_args, model_args, data_args, tokenizer, \n **kwargs,\n ):\n super().__init__()\n self.training_args = training_args\n self.model_args = model_args\n self.data_args = data_args\n compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))\n\n bnb_model_from_pretrained_args = {}\n\n ## load 4 8 bit \n if training_args.bits in [4, 8]:\n from transformers import BitsAndBytesConfig\n from peft import prepare_model_for_int8_training\n bnb_model_from_pretrained_args.update(dict(\n device_map={\"\": training_args.device},\n load_in_4bit=training_args.bits == 4,\n load_in_8bit=training_args.bits == 8,\n quantization_config=BitsAndBytesConfig(\n load_in_4bit=training_args.bits == 4,\n load_in_8bit=training_args.bits == 8,\n llm_int8_threshold=6.0,\n llm_int8_has_fp16_weight=False,\n bnb_4bit_compute_dtype=compute_dtype,\n bnb_4bit_use_double_quant=training_args.double_quant,\n bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'}\n )\n ))\n\n if model_args.graph_tower is not None:\n self.model = GraphLlamaForCausalLM.from_pretrained(\n model_args.model_name_or_path,\n cache_dir=training_args.cache_dir,\n **bnb_model_from_pretrained_args\n ) ## TODO: add real Graph Llama model \n else:\n self.model = transformers.LlamaForCausalLM.from_pretrained(\n model_args.model_name_or_path,\n cache_dir=training_args.cache_dir,\n **bnb_model_from_pretrained_args\n )\n self.model.config.pretrain_graph_model_path = self.model.config.pretrain_graph_model_path + model_args.graph_tower\n self.model.config.use_cache = False\n if model_args.freeze_backbone:\n self.model.model.requires_grad_(False)\n\n if training_args.bits in [4, 8]:\n self.model.config.torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))\n self.model = prepare_model_for_int8_training(self.model, use_gradient_checkpointing=training_args.gradient_checkpointing)\n\n if training_args.gradient_checkpointing and model_args.graph_tower is None:\n if hasattr(self.model, \"enable_input_require_grads\"):\n self.model.enable_input_require_grads()\n else:\n def make_inputs_require_grad(module, input, output):\n output.requires_grad_(True)\n self.model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)\n\n if training_args.lora_enable:\n from peft import LoraConfig, get_peft_model\n lora_config = LoraConfig(\n r=training_args.lora_r,\n lora_alpha=training_args.lora_alpha,\n target_modules=find_all_linear_names(model),\n lora_dropout=training_args.lora_dropout,\n bias=training_args.lora_bias,\n task_type=\"CAUSAL_LM\",\n )\n if training_args.bits == 16:\n if training_args.bf16:\n model.to(torch.bfloat16)\n if training_args.fp16:\n model.to(torch.float16)\n logging.warning(\"Adding LoRA adapters...\")\n model = get_peft_model(model, lora_config)\n \n if model_args.graph_tower is not None:\n model_graph_dict = self.model.get_model().initialize_graph_modules(\n graph_tower=model_args.graph_tower,\n graph_select_layer=model_args.graph_select_layer,\n pretrain_graph_mlp_adapter=model_args.pretrain_graph_mlp_adapter,\n fsdp=None\n )\n self.model.get_graph_tower().to(dtype=compute_dtype)\n # graph_config = model_graph_dict['graph_config']\n\n # data_args.graph_token_len = model_graph_dict['graph_token_len']\n # data_args.graph_processor = model_graph_dict['graph_processor']\n data_args.is_graph = True\n\n self.model.config.tune_graph_mlp_adapter = training_args.tune_graph_mlp_adapter = model_args.tune_graph_mlp_adapter\n if model_args.tune_graph_mlp_adapter:\n self.model.requires_grad_(False)\n for p in self.model.get_model().graph_projector.parameters():\n p.requires_grad = True\n\n self.model.config.freeze_graph_mlp_adapter = training_args.freeze_graph_mlp_adapter\n if training_args.freeze_graph_mlp_adapter:\n for p in self.model.get_model().graph_projector.parameters():\n p.requires_grad = False\n\n if training_args.bits in [4, 8]:\n self.model.get_model().graph_projector.to(dtype=compute_dtype, device=training_args.device)\n\n self.model.config.use_graph_start_end = data_args.use_graph_start_end = model_args.use_graph_start_end\n # graph_config.use_graph_start_end = training_args.use_graph_start_end = model_args.use_graph_start_end\n training_args.use_graph_start_end = model_args.use_graph_start_end\n self.model.config.sep_graph_conv_front = data_args.sep_graph_conv_front\n self.model.initialize_graph_tokenizer(use_graph_start_end=model_args.use_graph_start_end, tokenizer=tokenizer, device='cuda',\n tune_graph_mlp_adapter=model_args.tune_graph_mlp_adapter, pretrain_graph_mlp_adapter=model_args.pretrain_graph_mlp_adapter)\n\n params_no_grad = [n for n, p in self.model.named_parameters() if not p.requires_grad]\n if training_args.bits in [4, 8]:\n from peft.tuners.lora import LoraLayer\n for name, module in self.model.named_modules():\n if isinstance(module, LoraLayer):\n if training_args.bf16:\n module = module.to(torch.bfloat16)\n if 'norm' in name:\n module = module.to(torch.float32)\n if 'lm_head' in name or 'embed_tokens' in name:\n if hasattr(module, 'weight'):\n if training_args.bf16 and module.weight.dtype == torch.float32:\n module = module.to(torch.bfloat16)\n\n print('************************** parameters: #', sum(p.numel() for p in self.model.parameters() if p.requires_grad))\n tuned_params = []\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n tuned_params.append(name)\n print(tuned_params)\n \n def training_step(self, batch, batch_idx):\n bs = len(batch[\"input_ids\"])\n loss_dict = self.model(**batch)\n loss = loss_dict['loss']\n \n log_dict = {f'train_loss': loss.item()}\n self.log_dict(log_dict, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True, batch_size=bs)\n return loss\n\n def configure_optimizers(self):\n \"\"\"Prepare optimizer and schedule (linear warmup and decay)\"\"\"\n # no_decay = [\"bias\", \"LayerNorm.weight\"]\n # if IS_STAGE2:\n \n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters()], \"lr_scale\": [1e-5, 1e-4]\n }\n ]\n \n optimizer = AdamW(optimizer_grouped_parameters, lr=self.training_args.learning_rate)\n\n # scheduler = get_linear_schedule_with_warmup(\n # optimizer,\n # num_warmup_steps=self.training_args.warmup_steps,\n # num_training_steps=self.trainer.estimated_stepping_batches,\n # )\n scheduler = get_cosine_schedule_with_warmup(\n optimizer,\n num_warmup_steps=self.training_args.warmup_steps,\n num_training_steps=self.trainer.estimated_stepping_batches,\n )\n scheduler = {\"scheduler\": scheduler, \"interval\": \"step\", \"frequency\": 1}\n return [optimizer], [scheduler]"
}
] | import os
import copy
import json
import logging
import pathlib
import torch
import transformers
import torch.nn as nn
from dataclasses import dataclass, field
from typing import Dict, Optional, Sequence, List
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from graphgpt.train.graphchat_trainer import GraphChatTrainer
from graphgpt import conversation as conversation_lib
from graphgpt.model import *
from PIL import Image
from torch_geometric.data import Data
from lightning.pytorch.strategies import FSDPStrategy
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from transformers.models.llama.modeling_llama import LlamaDecoderLayer
from lightning.pytorch.loggers import WandbLogger
from lightning.pytorch import LightningModule, Trainer, seed_everything
from graphgpt.model.GraphLlama_pl import GraphGPT_pl
from lightning.pytorch.callbacks import ModelCheckpoint
from lightning.pytorch.callbacks.callback import Callback
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus | 5,665 | graph_type = copy.deepcopy(self.list_data_dict[i]['id']).split('_')[0]
graph_node_rep = self.graph_data_all[graph_type].x[graph_node_list] ##
cur_token_len = len(graph_node_rep) # FIXME: 14 is hardcoded patch size
sources = preprocess_graph(
copy.deepcopy([e["conversations"] for e in sources]),
self.graph_cfg, cur_token_len)
else:
sources = copy.deepcopy([e["conversations"] for e in sources])
data_dict = preprocess(
sources,
self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict["input_ids"][0],
labels=data_dict["labels"][0])
# image exist in the data
if 'graph' in self.list_data_dict[i]:
# data_dict['graph_node'] = graph_node_rep
# data_dict['graph_edge'] = graph_edge_index
# data_dict['target_node'] = target_node
data_dict['graph_data'] = Data(graph_node = graph_node_rep, edge_index=graph_edge_index, target_node = torch.tensor([target_node]))
elif self.graph_cfg['is_graph']:
# image does not exist in the data, but the model is multimodal
node_feas = self.graph_cfg['graph_processor'].node_feas
data_dict['graph_data'] = Data(graph_node = torch.zeros(3, node_feas), edge_index=torch.zeros(2, 3), target_node = torch.tensor([0]))
return data_dict
@dataclass
class DataCollatorForSupervisedDataset(object):
"""Collate examples for supervised fine-tuning."""
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
input_ids, labels = tuple([instance[key] for instance in instances]
for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
)
if 'graph_data' in instances[0]:
# graph_node_reps = [instance['graph_node'] for instance in instances]
# edge_index_reps = [instance['graph_edge'] for instance in instances]
# target_node_reps = [instance['target_node'] for instance in instances]
graph_data_batch = [instance['graph_data'] for instance in instances]
# if all(x is not None and x.shape == images[0].shape for x in images):
# batch['images'] = torch.stack(images)
# else:
# batch['images'] = images
# batch['graph_node_reps'] = graph_node_reps
# batch['edge_index_reps'] = edge_index_reps
# batch['edge_index_reps'] = target_node_reps
batch['graph_data'] = graph_data_batch
return batch
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_args, training_args) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (LazySupervisedDataset
if data_args.lazy_preprocess else SupervisedDataset)
train_dataset = dataset_cls(tokenizer=tokenizer,
data_path=data_args.data_path,
graph_cfg=dict(
is_graph=data_args.is_graph,
sep_graph_conv_front=data_args.sep_graph_conv_front,
graph_token_len=data_args.graph_token_len,
graph_content=data_args.graph_content,
use_graph_start_end=getattr(data_args, 'use_graph_start_end', False)
),
graph_data_path = data_args.graph_data_path)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
train_dataloader = DataLoader(train_dataset,
batch_size=training_args.per_device_train_batch_size,
num_workers=training_args.num_workers,
collate_fn=data_collator,
prefetch_factor=4,
pin_memory=True)
return train_dataloader, None
def train():
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if isinstance(training_args.gpus, str):
training_args.gpus = [int(x) for x in training_args.gpus.split(',')]
batch_size = training_args.real_batch_size
devices = training_args.gpus
num_devices = len(devices)
gradient_accumulation_steps = max(1,batch_size // (training_args.per_device_train_batch_size*num_devices))
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False
)
if model_args.version == "v1":
tokenizer.pad_token = tokenizer.unk_token
conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1_1"]
else:
raise ValueError
| # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: import and use code from ../data/dataset.py
IGNORE_INDEX = -100
DEFAULT_PAD_TOKEN = "[PAD]"
DEFAULT_EOS_TOKEN = "</s>"
DEFAULT_BOS_TOKEN = "<s>"
DEFAULT_UNK_TOKEN = "<unk>"
DEFAULT_GRAPH_TOKEN = "<graph>"
DEFAULT_GRAPH_PATCH_TOKEN = "<g_patch>"
DEFAULT_G_START_TOKEN = "<g_start>"
DEFAULT_G_END_TOKEN = "<g_end>"
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
version: Optional[str] = field(default="v0")
freeze_backbone: bool = field(default=False)
tune_graph_mlp_adapter: bool = field(default=False)
graph_tower: Optional[str] = field(default=None)
graph_select_layer: Optional[int] = field(default=-1) # default to the last layer
pretrain_graph_mlp_adapter: Optional[str] = field(default=None)
use_graph_start_end: bool = field(default=False)
model_save_name: Optional[str] = field(default="model_{epoch}-{step}")
@dataclass
class DataArguments:
data_path: str = field(default=None,
metadata={"help": "Path to the training data."})
lazy_preprocess: bool = False
is_graph: bool = False
sep_graph_conv_front: bool = False
graph_token_len: int = 0
graph_content: Optional[str] = field(default=None)
graph_data_path: Optional[str] = field(default=None)
image_aspect_ratio: str = 'square'
@dataclass
class TrainingArguments:
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
remove_unused_columns: bool = field(default=False)
freeze_graph_mlp_adapter: bool = field(default=False)
force_fsdp: bool = field(default=False)
model_max_length: int = field(
default=512,
metadata={
"help":
"Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
double_quant: bool = field(
default=True,
metadata={"help": "Compress the quantization statistics through double quantization."}
)
quant_type: str = field(
default="nf4",
metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."}
)
bits: int = field(
default=16,
metadata={"help": "How many bits to use."}
)
strategy: str = field(
default='fsdp'
)
real_batch_size: int = field(default=1)
lora_enable: bool = False
lora_r: int = 64
lora_alpha: int = 16
lora_dropout: float = 0.05
lora_weight_path: str = ""
lora_bias: str = "none"
disable_tqdm: bool =False
gpus: Optional[str] = field(default='0,1')
resume: Optional[str] = field(default=None)
adam_epsilon: float = field(default=1e-8)
warmup_steps:int = field(default=1000)
num_workers:int = field(default=16)
bf16: bool = field(default=False)
fp16: bool = field(default=False)
output_dir: str = field(default='./checkpoints/graphchat-gt-graphmatch-7b')
num_train_epochs: int = field(default=3)
per_device_train_batch_size: int = field(default=1)
per_device_eval_batch_size: int = field(default=1)
gradient_accumulation_steps: int = field(default=1)
evaluation_strategy: str = field(default='no')
save_strategy: str = field(default='steps')
save_steps: int = field(default=2400)
save_total_limit: int = field(default=1)
learning_rate: float = field(default=2e-5)
weight_decay: float = field(default=0.)
warmup_ratio: float = field(default=0.03)
lr_scheduler_type: str = field(default='cosine')
logging_steps: int = field(default=1)
tf32: bool = field(default=True)
gradient_checkpointing: bool = field(default=True)
report_to: str = field(default='wandb')
class SaveGraphProjectorCallback(Callback):
def __init__(self, output_dir, keys_to_match):
self.output_dir = output_dir
self.keys_to_match = keys_to_match
def on_train_epoch_end(self, trainer, pl_module, unused=None):
# 准备保存模型权重
_state_dict = pl_module.state_dict()
weight_to_save = {}
for k, v in _state_dict.items():
if any(key_match in k for key_match in self.keys_to_match):
weight_to_save[k] = v
# 确保输出目录存在
os.makedirs(self.output_dir, exist_ok=True)
# 保存 graph projector 的权重
torch.save(weight_to_save, os.path.join(self.output_dir, 'graph_projector.bin'))
def maybe_zero_3(param, ignore_status=False, name=None):
if hasattr(param, "ds_id"):
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
if not ignore_status:
logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}")
with zero.GatheredParameters([param]):
param = param.data.detach().cpu().clone()
else:
param = param.detach().cpu().clone()
return param
# Borrowed from peft.utils.get_peft_model_state_dict
def get_peft_state_maybe_zero_3(named_params, bias):
if bias == "none":
to_return = {k: t for k, t in named_params if "lora_" in k}
elif bias == "all":
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
maybe_lora_bias = {}
lora_bias_names = set()
for k, t in named_params:
if "lora_" in k:
to_return[k] = t
bias_name = k.split("lora_")[0] + "bias"
lora_bias_names.add(bias_name)
elif "bias" in k:
maybe_lora_bias[k] = t
for k, t in maybe_lora_bias:
if bias_name in lora_bias_names:
to_return[bias_name] = t
else:
raise NotImplementedError
to_return = {k: maybe_zero_3(v, name=k) for k, v in to_return.items()}
return to_return
def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
to_return = {k: t for k, t in named_params if "lora_" not in k}
if require_grad_only:
to_return = {k: t for k, t in to_return.items() if t.requires_grad}
to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
return to_return
def find_all_linear_names(model):
cls = torch.nn.Linear
lora_module_names = set()
for name, module in model.named_modules():
if isinstance(module, cls):
names = name.split('.')
lora_module_names.add(names[0] if len(names) == 1 else names[-1])
if 'lm_head' in lora_module_names: # needed for 16-bit
lora_module_names.remove('lm_head')
return list(lora_module_names)
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
output_dir: str):
"""Collects the state dict and dump to disk."""
if trainer.deepspeed:
torch.cuda.synchronize()
trainer.save_model(output_dir)
return
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {
key: value.cpu()
for key, value in state_dict.items()
}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def _tokenize_fn(strings: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
) for text in strings
]
input_ids = labels = [
tokenized.input_ids[0] for tokenized in tokenized_list
]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def _mask_targets(target, tokenized_lens, speakers):
# cur_idx = 0
cur_idx = tokenized_lens[0]
tokenized_lens = tokenized_lens[1:]
target[:cur_idx] = IGNORE_INDEX
for tokenized_len, speaker in zip(tokenized_lens, speakers):
if speaker == "human":
target[cur_idx+2:cur_idx + tokenized_len] = IGNORE_INDEX
cur_idx += tokenized_len
def _add_speaker_and_signal(header, source, get_conversation=True):
"""Add speaker and start/end signal on each round."""
BEGIN_SIGNAL = "### "
END_SIGNAL = "\n"
conversation = header
for sentence in source:
from_str = sentence["from"]
if from_str.lower() == "human":
from_str = conversation_lib.default_conversation.roles[0]
elif from_str.lower() == "gpt":
from_str = conversation_lib.default_conversation.roles[1]
else:
from_str = 'unknown'
sentence["value"] = (BEGIN_SIGNAL + from_str + ": " +
sentence["value"] + END_SIGNAL)
if get_conversation:
conversation += sentence["value"]
conversation += BEGIN_SIGNAL
return conversation
def preprocess_graph(
sources: Sequence[str],
graph_cfg: dict,
cur_token_len: int,
) -> Dict:
is_graph = graph_cfg['is_graph']
# image_token_len = multimodal_cfg['image_token_len']
graph_token_len = cur_token_len
if not is_graph:
return sources
for source in sources:
if graph_cfg['sep_graph_conv_front']:
assert DEFAULT_GRAPH_TOKEN in source[0]['value']
source[0]['value'] = source[0]['value'].replace(DEFAULT_GRAPH_TOKEN, '').strip()
source[0]['value'] = DEFAULT_GRAPH_TOKEN + conversation_lib.default_conversation.sep + conversation_lib.default_conversation.roles[0] + ": " + source[0]['value']
for sentence in source:
replace_token = DEFAULT_GRAPH_PATCH_TOKEN * graph_token_len
if graph_cfg['use_graph_start_end']:
replace_token = DEFAULT_G_START_TOKEN + replace_token + DEFAULT_G_END_TOKEN
sentence["value"] = sentence["value"].replace(DEFAULT_GRAPH_TOKEN, replace_token)
return sources
def preprocess_graph_LP(
sources: Sequence[str],
graph_cfg: dict,
cur_token_len_1: int,
cur_token_len_2: int,
) -> Dict:
is_graph = graph_cfg['is_graph']
# image_token_len = multimodal_cfg['image_token_len']
graph_token_len_1 = cur_token_len_1
graph_token_len_2 = cur_token_len_2
if not is_graph:
return sources
for source in sources:
if graph_cfg['sep_graph_conv_front']:
assert DEFAULT_GRAPH_TOKEN in source[0]['value']
source[0]['value'] = source[0]['value'].replace(DEFAULT_GRAPH_TOKEN, '').strip()
source[0]['value'] = DEFAULT_GRAPH_TOKEN + conversation_lib.default_conversation.sep + conversation_lib.default_conversation.roles[0] + ": " + source[0]['value']
for sentence in source:
replace_token_1 = DEFAULT_GRAPH_PATCH_TOKEN * graph_token_len_1
replace_token_2 = DEFAULT_GRAPH_PATCH_TOKEN * graph_token_len_2
if graph_cfg['use_graph_start_end']:
replace_token_1 = DEFAULT_G_START_TOKEN + replace_token_1 + DEFAULT_G_END_TOKEN
replace_token_2 = DEFAULT_G_START_TOKEN + replace_token_2 + DEFAULT_G_END_TOKEN
if DEFAULT_GRAPH_TOKEN in sentence["value"]:
first_index = sentence["value"].find(DEFAULT_GRAPH_TOKEN)
sentence["value"] = sentence["value"][:first_index] + replace_token_1 + sentence["value"][first_index+len(DEFAULT_GRAPH_TOKEN):]
# 替换第二个<graph>为B
second_index = sentence["value"].find(DEFAULT_GRAPH_TOKEN)
sentence["value"] = sentence["value"][:second_index] + replace_token_2 + sentence["value"][second_index+len(DEFAULT_GRAPH_TOKEN):]
# sentence["value"] = sentence["value"].replace(DEFAULT_GRAPH_TOKEN, replace_token)
# print(sources)
return sources
def preprocess_v1(
sources,
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
assert conv.sep_style == conversation_lib.SeparatorStyle.TWO
# Mask targets
sep = conv.sep + conv.roles[1] + ": "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for i, rou in enumerate(rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
round_len = len(tokenizer(rou).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_INDEX
print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
)
def preprocess_mpt(
sources,
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
assert conv.sep_style == conversation_lib.SeparatorStyle.MPT
# Mask targets
sep = conv.sep + conv.roles[1]
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep)
re_rounds = [conv.sep.join(rounds[:3])] # system + user + gpt
for conv_idx in range(3, len(rounds), 2):
re_rounds.append(conv.sep.join(rounds[conv_idx:conv_idx+2])) # user + gpt
cur_len = 0
target[:cur_len] = IGNORE_INDEX
for i, rou in enumerate(re_rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
round_len = len(tokenizer(rou).input_ids) + len(tokenizer(conv.sep).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids)
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_INDEX
print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
)
def preprocess(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
"""
Given a list of sources, each is a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
if conversation_lib.default_conversation.version == "v1":
return preprocess_v1(sources, tokenizer)
if conversation_lib.default_conversation.version == "mpt":
return preprocess_mpt(sources, tokenizer)
# add end signal and concatenate together
conversations = []
for source in sources:
header = f"{conversation_lib.default_conversation.system}\n\n"
conversation = _add_speaker_and_signal(header, source)
conversations.append(conversation)
# tokenize conversations
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
input_ids = conversations_tokenized["input_ids"]
targets = copy.deepcopy(input_ids)
for target, source in zip(targets, sources):
tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source],
tokenizer)["input_ids_lens"]
speakers = [sentence["from"] for sentence in source]
_mask_targets(target, tokenized_lens, speakers)
return dict(input_ids=input_ids, labels=targets)
class SupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, data_path: str,
tokenizer: transformers.PreTrainedTokenizer):
super(SupervisedDataset, self).__init__()
logging.warning("Loading data...")
list_data_dict = json.load(open(data_path, "r"))
logging.warning("Formatting inputs...")
sources = [example["conversations"] for example in list_data_dict]
data_dict = preprocess(sources, tokenizer)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
class LazySupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, data_path: str,
tokenizer: transformers.PreTrainedTokenizer,
graph_cfg: dict,
**kwargs,):
super(LazySupervisedDataset, self).__init__()
logging.warning("Loading data...")
list_data_dict = json.load(open(data_path, "r"))
logging.warning("Formatting inputs...Skip in lazy mode")
self.tokenizer = tokenizer
self.list_data_dict = list_data_dict
self.graph_cfg = graph_cfg
graph_data_path = kwargs.get('graph_data_path')
self.graph_data_all = torch.load(graph_data_path)
def __len__(self):
return len(self.list_data_dict)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
sources = self.list_data_dict[i]
if isinstance(i, int):
sources = [sources]
assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
task_type = self.list_data_dict[i]['id'].split("_")[-1]
if task_type != 'LP':
if 'graph' in sources[0]:
graph_dict = self.list_data_dict[i]['graph']
graph_edge_index = torch.Tensor(copy.deepcopy(graph_dict['edge_index'])).long()
graph_node_list = copy.deepcopy(graph_dict['node_list'])
target_node = copy.deepcopy(graph_dict['node_idx'])
graph_type = copy.deepcopy(self.list_data_dict[i]['id']).split('_')[0]
graph_node_rep = self.graph_data_all[graph_type].x[graph_node_list] ##
cur_token_len = len(graph_node_rep) # FIXME: 14 is hardcoded patch size
sources = preprocess_graph(
copy.deepcopy([e["conversations"] for e in sources]),
self.graph_cfg, cur_token_len)
else:
sources = copy.deepcopy([e["conversations"] for e in sources])
else:
if 'graph' in sources[0]:
graph_dict = self.list_data_dict[i]['graph']
graph_edge_index_1 = torch.Tensor(copy.deepcopy(graph_dict['edge_index_1'])).long()
graph_node_list_1 = copy.deepcopy(graph_dict['node_list_1'])
target_node_1 = copy.deepcopy(graph_dict['node_idx_1'])
graph_type = copy.deepcopy(self.list_data_dict[i]['id']).split('_')[0]
graph_node_rep_1 = self.graph_data_all[graph_type].x[graph_node_list_1] ##
cur_token_len_1 = len(graph_node_rep_1) # FIXME: 14 is hardcoded patch size
graph_edge_index_2 = torch.Tensor(copy.deepcopy(graph_dict['edge_index_2'])).long()
graph_node_list_2 = copy.deepcopy(graph_dict['node_list_2'])
target_node_2 = copy.deepcopy(graph_dict['node_idx_2'])
graph_node_rep_2 = self.graph_data_all[graph_type].x[graph_node_list_2] ##
cur_token_len_2 = len(graph_node_rep_2) # FIXME: 14 is hardcoded patch size
sources = preprocess_graph_LP(
copy.deepcopy([e["conversations"] for e in sources]),
self.graph_cfg, cur_token_len_1, cur_token_len_2)
else:
sources = copy.deepcopy([e["conversations"] for e in sources])
data_dict = preprocess(
sources,
self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict["input_ids"][0],
labels=data_dict["labels"][0])
# image exist in the data
if task_type != 'LP':
if 'graph' in self.list_data_dict[i]:
# data_dict['graph_node'] = graph_node_rep
# data_dict['graph_edge'] = graph_edge_index
# data_dict['target_node'] = target_node
data_dict['graph_data'] = Data(graph_node = graph_node_rep, edge_index=graph_edge_index, target_node = torch.tensor([target_node]))
elif self.graph_cfg['is_graph']:
# image does not exist in the data, but the model is multimodal
node_feas = self.graph_cfg['graph_processor'].node_feas
data_dict['graph_data'] = Data(graph_node = torch.zeros(3, node_feas), edge_index=torch.zeros(2, 3), target_node = torch.tensor([0]))
else:
if 'graph' in self.list_data_dict[i]:
# data_dict['graph_node'] = graph_node_rep
# data_dict['graph_edge'] = graph_edge_index
# data_dict['target_node'] = target_node
data_dict['graph_data'] = {
'graph_1': Data(graph_node = graph_node_rep_1, edge_index=graph_edge_index_1, target_node = torch.tensor([target_node_1])),
'graph_2': Data(graph_node = graph_node_rep_2, edge_index=graph_edge_index_2, target_node = torch.tensor([target_node_2]))
}
elif self.graph_cfg['is_graph']:
# image does not exist in the data, but the model is multimodal
node_feas = self.graph_cfg['graph_processor'].node_feas
data_dict['graph_data'] = Data(graph_node = torch.zeros(3, node_feas), edge_index=torch.zeros(2, 3), target_node = torch.tensor([0]))
return data_dict
class LazySupervisedDataset_back(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, data_path: str,
tokenizer: transformers.PreTrainedTokenizer,
graph_cfg: dict,
**kwargs,):
super(LazySupervisedDataset, self).__init__()
logging.warning("Loading data...")
list_data_dict = json.load(open(data_path, "r"))
logging.warning("Formatting inputs...Skip in lazy mode")
self.tokenizer = tokenizer
self.list_data_dict = list_data_dict
self.graph_cfg = graph_cfg
graph_data_path = kwargs.get('graph_data_path')
self.graph_data_all = torch.load(graph_data_path)
def __len__(self):
return len(self.list_data_dict)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
sources = self.list_data_dict[i]
if isinstance(i, int):
sources = [sources]
assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
if 'graph' in sources[0]:
graph_dict = self.list_data_dict[i]['graph']
graph_edge_index = torch.Tensor(copy.deepcopy(graph_dict['edge_index'])).long()
graph_node_list = copy.deepcopy(graph_dict['node_list'])
target_node = copy.deepcopy(graph_dict['node_idx'])
graph_type = copy.deepcopy(self.list_data_dict[i]['id']).split('_')[0]
graph_node_rep = self.graph_data_all[graph_type].x[graph_node_list] ##
cur_token_len = len(graph_node_rep) # FIXME: 14 is hardcoded patch size
sources = preprocess_graph(
copy.deepcopy([e["conversations"] for e in sources]),
self.graph_cfg, cur_token_len)
else:
sources = copy.deepcopy([e["conversations"] for e in sources])
data_dict = preprocess(
sources,
self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict["input_ids"][0],
labels=data_dict["labels"][0])
# image exist in the data
if 'graph' in self.list_data_dict[i]:
# data_dict['graph_node'] = graph_node_rep
# data_dict['graph_edge'] = graph_edge_index
# data_dict['target_node'] = target_node
data_dict['graph_data'] = Data(graph_node = graph_node_rep, edge_index=graph_edge_index, target_node = torch.tensor([target_node]))
elif self.graph_cfg['is_graph']:
# image does not exist in the data, but the model is multimodal
node_feas = self.graph_cfg['graph_processor'].node_feas
data_dict['graph_data'] = Data(graph_node = torch.zeros(3, node_feas), edge_index=torch.zeros(2, 3), target_node = torch.tensor([0]))
return data_dict
@dataclass
class DataCollatorForSupervisedDataset(object):
"""Collate examples for supervised fine-tuning."""
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
input_ids, labels = tuple([instance[key] for instance in instances]
for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
)
if 'graph_data' in instances[0]:
# graph_node_reps = [instance['graph_node'] for instance in instances]
# edge_index_reps = [instance['graph_edge'] for instance in instances]
# target_node_reps = [instance['target_node'] for instance in instances]
graph_data_batch = [instance['graph_data'] for instance in instances]
# if all(x is not None and x.shape == images[0].shape for x in images):
# batch['images'] = torch.stack(images)
# else:
# batch['images'] = images
# batch['graph_node_reps'] = graph_node_reps
# batch['edge_index_reps'] = edge_index_reps
# batch['edge_index_reps'] = target_node_reps
batch['graph_data'] = graph_data_batch
return batch
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_args, training_args) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (LazySupervisedDataset
if data_args.lazy_preprocess else SupervisedDataset)
train_dataset = dataset_cls(tokenizer=tokenizer,
data_path=data_args.data_path,
graph_cfg=dict(
is_graph=data_args.is_graph,
sep_graph_conv_front=data_args.sep_graph_conv_front,
graph_token_len=data_args.graph_token_len,
graph_content=data_args.graph_content,
use_graph_start_end=getattr(data_args, 'use_graph_start_end', False)
),
graph_data_path = data_args.graph_data_path)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
train_dataloader = DataLoader(train_dataset,
batch_size=training_args.per_device_train_batch_size,
num_workers=training_args.num_workers,
collate_fn=data_collator,
prefetch_factor=4,
pin_memory=True)
return train_dataloader, None
def train():
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if isinstance(training_args.gpus, str):
training_args.gpus = [int(x) for x in training_args.gpus.split(',')]
batch_size = training_args.real_batch_size
devices = training_args.gpus
num_devices = len(devices)
gradient_accumulation_steps = max(1,batch_size // (training_args.per_device_train_batch_size*num_devices))
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False
)
if model_args.version == "v1":
tokenizer.pad_token = tokenizer.unk_token
conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1_1"]
else:
raise ValueError
| model = GraphGPT_pl(training_args, model_args, data_args, tokenizer) | 2 | 2023-10-15 05:13:24+00:00 | 8k |
hkchengrex/Cutie | gui/ritm/model/is_hrnet_model.py | [
{
"identifier": "serialize",
"path": "gui/ritm/utils/serialization.py",
"snippet": "def serialize(init):\n parameters = list(inspect.signature(init).parameters)\n\n @wraps(init)\n def new_init(self, *args, **kwargs):\n params = deepcopy(kwargs)\n for pname, value in zip(parameters[1:], args):\n params[pname] = value\n\n config = {'class': get_classname(self.__class__), 'params': dict()}\n specified_params = set(params.keys())\n\n for pname, param in get_default_params(self.__class__).items():\n if pname not in params:\n params[pname] = param.default\n\n for name, value in list(params.items()):\n param_type = 'builtin'\n if inspect.isclass(value):\n param_type = 'class'\n value = get_classname(value)\n\n config['params'][name] = {\n 'type': param_type,\n 'value': value,\n 'specified': name in specified_params\n }\n\n setattr(self, '_config', config)\n init(self, *args, **kwargs)\n\n return new_init"
},
{
"identifier": "ISModel",
"path": "gui/ritm/model/is_model.py",
"snippet": "class ISModel(nn.Module):\n def __init__(self,\n use_rgb_conv=True,\n with_aux_output=False,\n norm_radius=260,\n use_disks=False,\n cpu_dist_maps=False,\n clicks_groups=None,\n with_prev_mask=False,\n use_leaky_relu=False,\n binary_prev_mask=False,\n conv_extend=False,\n norm_layer=nn.BatchNorm2d,\n norm_mean_std=([.485, .456, .406], [.229, .224, .225])):\n super().__init__()\n self.with_aux_output = with_aux_output\n self.clicks_groups = clicks_groups\n self.with_prev_mask = with_prev_mask\n self.binary_prev_mask = binary_prev_mask\n self.normalization = BatchImageNormalize(norm_mean_std[0], norm_mean_std[1])\n\n self.coord_feature_ch = 2\n if clicks_groups is not None:\n self.coord_feature_ch *= len(clicks_groups)\n\n if self.with_prev_mask:\n self.coord_feature_ch += 1\n\n if use_rgb_conv:\n rgb_conv_layers = [\n nn.Conv2d(in_channels=3 + self.coord_feature_ch,\n out_channels=6 + self.coord_feature_ch,\n kernel_size=1),\n norm_layer(6 + self.coord_feature_ch),\n nn.LeakyReLU(negative_slope=0.2) if use_leaky_relu else nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=6 + self.coord_feature_ch, out_channels=3, kernel_size=1)\n ]\n self.rgb_conv = nn.Sequential(*rgb_conv_layers)\n elif conv_extend:\n self.rgb_conv = None\n self.maps_transform = nn.Conv2d(in_channels=self.coord_feature_ch,\n out_channels=64,\n kernel_size=3,\n stride=2,\n padding=1)\n self.maps_transform.apply(LRMult(0.1))\n else:\n self.rgb_conv = None\n mt_layers = [\n nn.Conv2d(in_channels=self.coord_feature_ch, out_channels=16, kernel_size=1),\n nn.LeakyReLU(negative_slope=0.2) if use_leaky_relu else nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=16, out_channels=64, kernel_size=3, stride=2, padding=1),\n ScaleLayer(init_value=0.05, lr_mult=1)\n ]\n self.maps_transform = nn.Sequential(*mt_layers)\n\n if self.clicks_groups is not None:\n self.dist_maps = nn.ModuleList()\n for click_radius in self.clicks_groups:\n self.dist_maps.append(\n DistMaps(norm_radius=click_radius,\n spatial_scale=1.0,\n cpu_mode=cpu_dist_maps,\n use_disks=use_disks))\n else:\n self.dist_maps = DistMaps(norm_radius=norm_radius,\n spatial_scale=1.0,\n cpu_mode=cpu_dist_maps,\n use_disks=use_disks)\n\n def forward(self, image, points):\n image, prev_mask = self.prepare_input(image)\n coord_features = self.get_coord_features(image, prev_mask, points)\n\n if self.rgb_conv is not None:\n x = self.rgb_conv(torch.cat((image, coord_features), dim=1))\n outputs = self.backbone_forward(x)\n else:\n coord_features = self.maps_transform(coord_features)\n outputs = self.backbone_forward(image, coord_features)\n\n outputs['instances'] = nn.functional.interpolate(outputs['instances'],\n size=image.size()[2:],\n mode='bilinear',\n align_corners=True)\n if self.with_aux_output:\n outputs['instances_aux'] = nn.functional.interpolate(outputs['instances_aux'],\n size=image.size()[2:],\n mode='bilinear',\n align_corners=True)\n\n return outputs\n\n def prepare_input(self, image):\n prev_mask = None\n if self.with_prev_mask:\n prev_mask = image[:, 3:, :, :]\n image = image[:, :3, :, :]\n if self.binary_prev_mask:\n prev_mask = (prev_mask > 0.5).float()\n\n image = self.normalization(image)\n return image, prev_mask\n\n def backbone_forward(self, image, coord_features=None):\n raise NotImplementedError\n\n def get_coord_features(self, image, prev_mask, points):\n if self.clicks_groups is not None:\n points_groups = split_points_by_order(points,\n groups=(2, ) + (1, ) *\n (len(self.clicks_groups) - 2) + (-1, ))\n coord_features = [\n dist_map(image, pg) for dist_map, pg in zip(self.dist_maps, points_groups)\n ]\n coord_features = torch.cat(coord_features, dim=1)\n else:\n coord_features = self.dist_maps(image, points)\n\n if prev_mask is not None:\n coord_features = torch.cat((prev_mask, coord_features), dim=1)\n\n return coord_features"
},
{
"identifier": "HighResolutionNet",
"path": "gui/ritm/model/modeling/hrnet_ocr.py",
"snippet": "class HighResolutionNet(nn.Module):\n def __init__(self, width, num_classes, ocr_width=256, small=False,\n norm_layer=nn.BatchNorm2d, align_corners=True):\n super(HighResolutionNet, self).__init__()\n self.norm_layer = norm_layer\n self.width = width\n self.ocr_width = ocr_width\n self.align_corners = align_corners\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = norm_layer(64)\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn2 = norm_layer(64)\n self.relu = nn.ReLU(inplace=relu_inplace)\n\n num_blocks = 2 if small else 4\n\n stage1_num_channels = 64\n self.layer1 = self._make_layer(BottleneckV1b, 64, stage1_num_channels, blocks=num_blocks)\n stage1_out_channel = BottleneckV1b.expansion * stage1_num_channels\n\n self.stage2_num_branches = 2\n num_channels = [width, 2 * width]\n num_inchannels = [\n num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))]\n self.transition1 = self._make_transition_layer(\n [stage1_out_channel], num_inchannels)\n self.stage2, pre_stage_channels = self._make_stage(\n BasicBlockV1b, num_inchannels=num_inchannels, num_modules=1, num_branches=self.stage2_num_branches,\n num_blocks=2 * [num_blocks], num_channels=num_channels)\n\n self.stage3_num_branches = 3\n num_channels = [width, 2 * width, 4 * width]\n num_inchannels = [\n num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))]\n self.transition2 = self._make_transition_layer(\n pre_stage_channels, num_inchannels)\n self.stage3, pre_stage_channels = self._make_stage(\n BasicBlockV1b, num_inchannels=num_inchannels,\n num_modules=3 if small else 4, num_branches=self.stage3_num_branches,\n num_blocks=3 * [num_blocks], num_channels=num_channels)\n\n self.stage4_num_branches = 4\n num_channels = [width, 2 * width, 4 * width, 8 * width]\n num_inchannels = [\n num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))]\n self.transition3 = self._make_transition_layer(\n pre_stage_channels, num_inchannels)\n self.stage4, pre_stage_channels = self._make_stage(\n BasicBlockV1b, num_inchannels=num_inchannels, num_modules=2 if small else 3,\n num_branches=self.stage4_num_branches,\n num_blocks=4 * [num_blocks], num_channels=num_channels)\n\n last_inp_channels = np.int32(np.sum(pre_stage_channels))\n if self.ocr_width > 0:\n ocr_mid_channels = 2 * self.ocr_width\n ocr_key_channels = self.ocr_width\n\n self.conv3x3_ocr = nn.Sequential(\n nn.Conv2d(last_inp_channels, ocr_mid_channels,\n kernel_size=3, stride=1, padding=1),\n norm_layer(ocr_mid_channels),\n nn.ReLU(inplace=relu_inplace),\n )\n self.ocr_gather_head = SpatialGather_Module(num_classes)\n\n self.ocr_distri_head = SpatialOCR_Module(in_channels=ocr_mid_channels,\n key_channels=ocr_key_channels,\n out_channels=ocr_mid_channels,\n scale=1,\n dropout=0.05,\n norm_layer=norm_layer,\n align_corners=align_corners)\n self.cls_head = nn.Conv2d(\n ocr_mid_channels, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n\n self.aux_head = nn.Sequential(\n nn.Conv2d(last_inp_channels, last_inp_channels,\n kernel_size=1, stride=1, padding=0),\n norm_layer(last_inp_channels),\n nn.ReLU(inplace=relu_inplace),\n nn.Conv2d(last_inp_channels, num_classes,\n kernel_size=1, stride=1, padding=0, bias=True)\n )\n else:\n self.cls_head = nn.Sequential(\n nn.Conv2d(last_inp_channels, last_inp_channels,\n kernel_size=3, stride=1, padding=1),\n norm_layer(last_inp_channels),\n nn.ReLU(inplace=relu_inplace),\n nn.Conv2d(last_inp_channels, num_classes,\n kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n def _make_transition_layer(\n self, num_channels_pre_layer, num_channels_cur_layer):\n num_branches_cur = len(num_channels_cur_layer)\n num_branches_pre = len(num_channels_pre_layer)\n\n transition_layers = []\n for i in range(num_branches_cur):\n if i < num_branches_pre:\n if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n transition_layers.append(nn.Sequential(\n nn.Conv2d(num_channels_pre_layer[i],\n num_channels_cur_layer[i],\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n self.norm_layer(num_channels_cur_layer[i]),\n nn.ReLU(inplace=relu_inplace)))\n else:\n transition_layers.append(None)\n else:\n conv3x3s = []\n for j in range(i + 1 - num_branches_pre):\n inchannels = num_channels_pre_layer[-1]\n outchannels = num_channels_cur_layer[i] \\\n if j == i - num_branches_pre else inchannels\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(inchannels, outchannels,\n kernel_size=3, stride=2, padding=1, bias=False),\n self.norm_layer(outchannels),\n nn.ReLU(inplace=relu_inplace)))\n transition_layers.append(nn.Sequential(*conv3x3s))\n\n return nn.ModuleList(transition_layers)\n\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n self.norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride,\n downsample=downsample, norm_layer=self.norm_layer))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(inplanes, planes, norm_layer=self.norm_layer))\n\n return nn.Sequential(*layers)\n\n def _make_stage(self, block, num_inchannels,\n num_modules, num_branches, num_blocks, num_channels,\n fuse_method='SUM',\n multi_scale_output=True):\n modules = []\n for i in range(num_modules):\n # multi_scale_output is only used last module\n if not multi_scale_output and i == num_modules - 1:\n reset_multi_scale_output = False\n else:\n reset_multi_scale_output = True\n modules.append(\n HighResolutionModule(num_branches,\n block,\n num_blocks,\n num_inchannels,\n num_channels,\n fuse_method,\n reset_multi_scale_output,\n norm_layer=self.norm_layer,\n align_corners=self.align_corners)\n )\n num_inchannels = modules[-1].get_num_inchannels()\n\n return nn.Sequential(*modules), num_inchannels\n\n def forward(self, x, additional_features=None):\n feats = self.compute_hrnet_feats(x, additional_features)\n if self.ocr_width > 0:\n out_aux = self.aux_head(feats)\n feats = self.conv3x3_ocr(feats)\n\n context = self.ocr_gather_head(feats, out_aux)\n feats = self.ocr_distri_head(feats, context)\n out = self.cls_head(feats)\n return [out, out_aux]\n else:\n return [self.cls_head(feats), None]\n\n def compute_hrnet_feats(self, x, additional_features):\n x = self.compute_pre_stage_features(x, additional_features)\n x = self.layer1(x)\n\n x_list = []\n for i in range(self.stage2_num_branches):\n if self.transition1[i] is not None:\n x_list.append(self.transition1[i](x))\n else:\n x_list.append(x)\n y_list = self.stage2(x_list)\n\n x_list = []\n for i in range(self.stage3_num_branches):\n if self.transition2[i] is not None:\n if i < self.stage2_num_branches:\n x_list.append(self.transition2[i](y_list[i]))\n else:\n x_list.append(self.transition2[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n y_list = self.stage3(x_list)\n\n x_list = []\n for i in range(self.stage4_num_branches):\n if self.transition3[i] is not None:\n if i < self.stage3_num_branches:\n x_list.append(self.transition3[i](y_list[i]))\n else:\n x_list.append(self.transition3[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n x = self.stage4(x_list)\n\n return self.aggregate_hrnet_features(x)\n\n def compute_pre_stage_features(self, x, additional_features):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n if additional_features is not None:\n x = x + additional_features\n x = self.conv2(x)\n x = self.bn2(x)\n return self.relu(x)\n\n def aggregate_hrnet_features(self, x):\n # Upsampling\n x0_h, x0_w = x[0].size(2), x[0].size(3)\n x1 = F.interpolate(x[1], size=(x0_h, x0_w),\n mode='bilinear', align_corners=self.align_corners)\n x2 = F.interpolate(x[2], size=(x0_h, x0_w),\n mode='bilinear', align_corners=self.align_corners)\n x3 = F.interpolate(x[3], size=(x0_h, x0_w),\n mode='bilinear', align_corners=self.align_corners)\n\n return torch.cat([x[0], x1, x2, x3], 1)\n\n def load_pretrained_weights(self, pretrained_path=''):\n model_dict = self.state_dict()\n\n if not os.path.exists(pretrained_path):\n print(f'\\nFile \"{pretrained_path}\" does not exist.')\n print('You need to specify the correct path to the pre-trained weights.\\n'\n 'You can download the weights for HRNet from the repository:\\n'\n 'https://github.com/HRNet/HRNet-Image-Classification')\n exit(1)\n pretrained_dict = torch.load(pretrained_path, map_location={'cuda:0': 'cpu'})\n pretrained_dict = {k.replace('last_layer', 'aux_head').replace('model.', ''): v for k, v in\n pretrained_dict.items()}\n\n pretrained_dict = {k: v for k, v in pretrained_dict.items()\n if k in model_dict.keys()}\n\n model_dict.update(pretrained_dict)\n self.load_state_dict(model_dict)"
},
{
"identifier": "LRMult",
"path": "gui/ritm/model/modifiers.py",
"snippet": "class LRMult(object):\n def __init__(self, lr_mult=1.):\n self.lr_mult = lr_mult\n\n def __call__(self, m):\n if getattr(m, 'weight', None) is not None:\n m.weight.lr_mult = self.lr_mult\n if getattr(m, 'bias', None) is not None:\n m.bias.lr_mult = self.lr_mult"
}
] | import torch.nn as nn
from ..utils.serialization import serialize
from .is_model import ISModel
from .modeling.hrnet_ocr import HighResolutionNet
from ..model.modifiers import LRMult | 4,490 |
class HRNetModel(ISModel):
@serialize
def __init__(self,
width=48,
ocr_width=256,
small=False,
backbone_lr_mult=0.1,
norm_layer=nn.BatchNorm2d,
**kwargs):
super().__init__(norm_layer=norm_layer, **kwargs)
|
class HRNetModel(ISModel):
@serialize
def __init__(self,
width=48,
ocr_width=256,
small=False,
backbone_lr_mult=0.1,
norm_layer=nn.BatchNorm2d,
**kwargs):
super().__init__(norm_layer=norm_layer, **kwargs)
| self.feature_extractor = HighResolutionNet(width=width, | 2 | 2023-10-19 17:49:24+00:00 | 8k |
DeepGraphLearning/ULTRA | script/run_many.py | [
{
"identifier": "tasks",
"path": "ultra/tasks.py",
"snippet": "def edge_match(edge_index, query_index):\ndef negative_sampling(data, batch, num_negative, strict=True):\ndef all_negative(data, batch):\ndef strict_negative_mask(data, batch):\ndef compute_ranking(pred, target, mask=None):\ndef build_relation_graph(graph):"
},
{
"identifier": "util",
"path": "ultra/util.py",
"snippet": "def detect_variables(cfg_file):\ndef load_config(cfg_file, context=None):\ndef literal_eval(string):\ndef parse_args():\ndef get_root_logger(file=True):\ndef get_rank():\ndef get_world_size():\ndef synchronize():\ndef get_device(cfg):\ndef create_working_directory(cfg):\ndef build_dataset(cfg):"
},
{
"identifier": "Ultra",
"path": "ultra/models.py",
"snippet": "class Ultra(nn.Module):\n\n def __init__(self, rel_model_cfg, entity_model_cfg):\n # kept that because super Ultra sounds cool\n super(Ultra, self).__init__()\n\n self.relation_model = RelNBFNet(**rel_model_cfg)\n self.entity_model = EntityNBFNet(**entity_model_cfg)\n\n \n def forward(self, data, batch):\n \n # batch shape: (bs, 1+num_negs, 3)\n # relations are the same all positive and negative triples, so we can extract only one from the first triple among 1+nug_negs\n query_rels = batch[:, 0, 2]\n relation_representations = self.relation_model(data.relation_graph, query=query_rels)\n score = self.entity_model(data, relation_representations, batch)\n \n return score"
},
{
"identifier": "train_and_validate",
"path": "script/run.py",
"snippet": "def train_and_validate(cfg, model, train_data, valid_data, device, logger, filtered_data=None, batch_per_epoch=None):\n if cfg.train.num_epoch == 0:\n return\n\n world_size = util.get_world_size()\n rank = util.get_rank()\n\n train_triplets = torch.cat([train_data.target_edge_index, train_data.target_edge_type.unsqueeze(0)]).t()\n sampler = torch_data.DistributedSampler(train_triplets, world_size, rank)\n train_loader = torch_data.DataLoader(train_triplets, cfg.train.batch_size, sampler=sampler)\n\n batch_per_epoch = batch_per_epoch or len(train_loader)\n\n cls = cfg.optimizer.pop(\"class\")\n optimizer = getattr(optim, cls)(model.parameters(), **cfg.optimizer)\n num_params = sum(p.numel() for p in model.parameters())\n logger.warning(line)\n logger.warning(f\"Number of parameters: {num_params}\")\n\n if world_size > 1:\n parallel_model = nn.parallel.DistributedDataParallel(model, device_ids=[device])\n else:\n parallel_model = model\n\n step = math.ceil(cfg.train.num_epoch / 10)\n best_result = float(\"-inf\")\n best_epoch = -1\n\n batch_id = 0\n for i in range(0, cfg.train.num_epoch, step):\n parallel_model.train()\n for epoch in range(i, min(cfg.train.num_epoch, i + step)):\n if util.get_rank() == 0:\n logger.warning(separator)\n logger.warning(\"Epoch %d begin\" % epoch)\n\n losses = []\n sampler.set_epoch(epoch)\n for batch in train_loader:\n batch = tasks.negative_sampling(train_data, batch, cfg.task.num_negative,\n strict=cfg.task.strict_negative)\n pred = parallel_model(train_data, batch)\n target = torch.zeros_like(pred)\n target[:, 0] = 1\n loss = F.binary_cross_entropy_with_logits(pred, target, reduction=\"none\")\n neg_weight = torch.ones_like(pred)\n if cfg.task.adversarial_temperature > 0:\n with torch.no_grad():\n neg_weight[:, 1:] = F.softmax(pred[:, 1:] / cfg.task.adversarial_temperature, dim=-1)\n else:\n neg_weight[:, 1:] = 1 / cfg.task.num_negative\n loss = (loss * neg_weight).sum(dim=-1) / neg_weight.sum(dim=-1)\n loss = loss.mean()\n\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n if util.get_rank() == 0 and batch_id % cfg.train.log_interval == 0:\n logger.warning(separator)\n logger.warning(\"binary cross entropy: %g\" % loss)\n losses.append(loss.item())\n batch_id += 1\n\n if util.get_rank() == 0:\n avg_loss = sum(losses) / len(losses)\n logger.warning(separator)\n logger.warning(\"Epoch %d end\" % epoch)\n logger.warning(line)\n logger.warning(\"average binary cross entropy: %g\" % avg_loss)\n\n epoch = min(cfg.train.num_epoch, i + step)\n if rank == 0:\n logger.warning(\"Save checkpoint to model_epoch_%d.pth\" % epoch)\n state = {\n \"model\": model.state_dict(),\n \"optimizer\": optimizer.state_dict()\n }\n torch.save(state, \"model_epoch_%d.pth\" % epoch)\n util.synchronize()\n\n if rank == 0:\n logger.warning(separator)\n logger.warning(\"Evaluate on valid\")\n result = test(cfg, model, valid_data, filtered_data=filtered_data, device=device, logger=logger)\n if result > best_result:\n best_result = result\n best_epoch = epoch\n\n if rank == 0:\n logger.warning(\"Load checkpoint from model_epoch_%d.pth\" % best_epoch)\n state = torch.load(\"model_epoch_%d.pth\" % best_epoch, map_location=device)\n model.load_state_dict(state[\"model\"])\n util.synchronize()"
},
{
"identifier": "test",
"path": "script/run.py",
"snippet": "@torch.no_grad()\ndef test(cfg, model, test_data, device, logger, filtered_data=None, return_metrics=False):\n world_size = util.get_world_size()\n rank = util.get_rank()\n\n test_triplets = torch.cat([test_data.target_edge_index, test_data.target_edge_type.unsqueeze(0)]).t()\n sampler = torch_data.DistributedSampler(test_triplets, world_size, rank)\n test_loader = torch_data.DataLoader(test_triplets, cfg.train.batch_size, sampler=sampler)\n\n model.eval()\n rankings = []\n num_negatives = []\n tail_rankings, num_tail_negs = [], [] # for explicit tail-only evaluation needed for 5 datasets\n for batch in test_loader:\n t_batch, h_batch = tasks.all_negative(test_data, batch)\n t_pred = model(test_data, t_batch)\n h_pred = model(test_data, h_batch)\n\n if filtered_data is None:\n t_mask, h_mask = tasks.strict_negative_mask(test_data, batch)\n else:\n t_mask, h_mask = tasks.strict_negative_mask(filtered_data, batch)\n pos_h_index, pos_t_index, pos_r_index = batch.t()\n t_ranking = tasks.compute_ranking(t_pred, pos_t_index, t_mask)\n h_ranking = tasks.compute_ranking(h_pred, pos_h_index, h_mask)\n num_t_negative = t_mask.sum(dim=-1)\n num_h_negative = h_mask.sum(dim=-1)\n\n rankings += [t_ranking, h_ranking]\n num_negatives += [num_t_negative, num_h_negative]\n\n tail_rankings += [t_ranking]\n num_tail_negs += [num_t_negative]\n\n ranking = torch.cat(rankings)\n num_negative = torch.cat(num_negatives)\n all_size = torch.zeros(world_size, dtype=torch.long, device=device)\n all_size[rank] = len(ranking)\n\n # ugly repetitive code for tail-only ranks processing\n tail_ranking = torch.cat(tail_rankings)\n num_tail_neg = torch.cat(num_tail_negs)\n all_size_t = torch.zeros(world_size, dtype=torch.long, device=device)\n all_size_t[rank] = len(tail_ranking)\n if world_size > 1:\n dist.all_reduce(all_size, op=dist.ReduceOp.SUM)\n dist.all_reduce(all_size_t, op=dist.ReduceOp.SUM)\n\n # obtaining all ranks \n cum_size = all_size.cumsum(0)\n all_ranking = torch.zeros(all_size.sum(), dtype=torch.long, device=device)\n all_ranking[cum_size[rank] - all_size[rank]: cum_size[rank]] = ranking\n all_num_negative = torch.zeros(all_size.sum(), dtype=torch.long, device=device)\n all_num_negative[cum_size[rank] - all_size[rank]: cum_size[rank]] = num_negative\n\n # the same for tails-only ranks\n cum_size_t = all_size_t.cumsum(0)\n all_ranking_t = torch.zeros(all_size_t.sum(), dtype=torch.long, device=device)\n all_ranking_t[cum_size_t[rank] - all_size_t[rank]: cum_size_t[rank]] = tail_ranking\n all_num_negative_t = torch.zeros(all_size_t.sum(), dtype=torch.long, device=device)\n all_num_negative_t[cum_size_t[rank] - all_size_t[rank]: cum_size_t[rank]] = num_tail_neg\n if world_size > 1:\n dist.all_reduce(all_ranking, op=dist.ReduceOp.SUM)\n dist.all_reduce(all_num_negative, op=dist.ReduceOp.SUM)\n dist.all_reduce(all_ranking_t, op=dist.ReduceOp.SUM)\n dist.all_reduce(all_num_negative_t, op=dist.ReduceOp.SUM)\n\n metrics = {}\n if rank == 0:\n for metric in cfg.task.metric:\n if \"-tail\" in metric:\n _metric_name, direction = metric.split(\"-\")\n if direction != \"tail\":\n raise ValueError(\"Only tail metric is supported in this mode\")\n _ranking = all_ranking_t\n _num_neg = all_num_negative_t\n else:\n _ranking = all_ranking \n _num_neg = all_num_negative \n _metric_name = metric\n \n if _metric_name == \"mr\":\n score = _ranking.float().mean()\n elif _metric_name == \"mrr\":\n score = (1 / _ranking.float()).mean()\n elif _metric_name.startswith(\"hits@\"):\n values = _metric_name[5:].split(\"_\")\n threshold = int(values[0])\n if len(values) > 1:\n num_sample = int(values[1])\n # unbiased estimation\n fp_rate = (_ranking - 1).float() / _num_neg\n score = 0\n for i in range(threshold):\n # choose i false positive from num_sample - 1 negatives\n num_comb = math.factorial(num_sample - 1) / \\\n math.factorial(i) / math.factorial(num_sample - i - 1)\n score += num_comb * (fp_rate ** i) * ((1 - fp_rate) ** (num_sample - i - 1))\n score = score.mean()\n else:\n score = (_ranking <= threshold).float().mean()\n logger.warning(\"%s: %g\" % (metric, score))\n metrics[metric] = score\n mrr = (1 / all_ranking.float()).mean()\n\n return mrr if not return_metrics else metrics"
}
] | import os
import sys
import csv
import math
import time
import pprint
import argparse
import random
import torch
import torch_geometric as pyg
from torch import optim
from torch import nn
from torch.nn import functional as F
from torch import distributed as dist
from torch.utils import data as torch_data
from torch_geometric.data import Data
from ultra import tasks, util
from ultra.models import Ultra
from script.run import train_and_validate, test | 5,410 | torch.manual_seed(seed + util.get_rank())
torch.cuda.manual_seed(seed + util.get_rank())
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == "__main__":
seeds = [1024, 42, 1337, 512, 256]
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="yaml configuration file", required=True)
parser.add_argument("-d", "--datasets", help="target datasets", default='FB15k237Inductive:v1,NELLInductive:v4', type=str, required=True)
parser.add_argument("-reps", "--repeats", help="number of times to repeat each exp", default=1, type=int)
parser.add_argument("-ft", "--finetune", help="finetune the checkpoint on the specified datasets", action='store_true')
parser.add_argument("-tr", "--train", help="train the model from scratch", action='store_true')
args, unparsed = parser.parse_known_args()
datasets = args.datasets.split(",")
path = os.path.dirname(os.path.expanduser(__file__))
results_file = os.path.join(path, f"ultra_results_{time.strftime('%Y-%m-%d-%H-%M-%S')}.csv")
for graph in datasets:
ds, version = graph.split(":") if ":" in graph else (graph, None)
for i in range(args.repeats):
seed = seeds[i] if i < len(seeds) else random.randint(0, 10000)
print(f"Running on {graph}, iteration {i+1} / {args.repeats}, seed: {seed}")
# get dynamic arguments defined in the config file
vars = util.detect_variables(args.config)
parser = argparse.ArgumentParser()
for var in vars:
parser.add_argument("--%s" % var)
vars = parser.parse_known_args(unparsed)[0]
vars = {k: util.literal_eval(v) for k, v in vars._get_kwargs()}
if args.finetune:
epochs, batch_per_epoch = default_finetuning_config[ds]
elif args.train:
epochs, batch_per_epoch = default_train_config[ds]
else:
epochs, batch_per_epoch = 0, 'null'
vars['epochs'] = epochs
vars['bpe'] = batch_per_epoch
vars['dataset'] = ds
if version is not None:
vars['version'] = version
cfg = util.load_config(args.config, context=vars)
root_dir = os.path.expanduser(cfg.output_dir) # resetting the path to avoid inf nesting
os.chdir(root_dir)
working_dir = util.create_working_directory(cfg)
set_seed(seed)
# args, vars = util.parse_args()
# cfg = util.load_config(args.config, context=vars)
# working_dir = util.create_working_directory(cfg)
# torch.manual_seed(args.seed + util.get_rank())
logger = util.get_root_logger()
if util.get_rank() == 0:
logger.warning("Random seed: %d" % seed)
logger.warning("Config file: %s" % args.config)
logger.warning(pprint.pformat(cfg))
task_name = cfg.task["name"]
dataset = util.build_dataset(cfg)
device = util.get_device(cfg)
train_data, valid_data, test_data = dataset[0], dataset[1], dataset[2]
train_data = train_data.to(device)
valid_data = valid_data.to(device)
test_data = test_data.to(device)
model = Ultra(
rel_model_cfg=cfg.model.relation_model,
entity_model_cfg=cfg.model.entity_model,
)
if "checkpoint" in cfg and cfg.checkpoint is not None:
state = torch.load(cfg.checkpoint, map_location="cpu")
model.load_state_dict(state["model"])
#model = pyg.compile(model, dynamic=True)
model = model.to(device)
if task_name == "InductiveInference":
# filtering for inductive datasets
# Grail, MTDEA, HM datasets have validation sets based off the training graph
# ILPC, Ingram have validation sets from the inference graph
# filtering dataset should contain all true edges (base graph + (valid) + test)
if "ILPC" in cfg.dataset['class'] or "Ingram" in cfg.dataset['class']:
# add inference, valid, test as the validation and test filtering graphs
full_inference_edges = torch.cat([valid_data.edge_index, valid_data.target_edge_index, test_data.target_edge_index], dim=1)
full_inference_etypes = torch.cat([valid_data.edge_type, valid_data.target_edge_type, test_data.target_edge_type])
test_filtered_data = Data(edge_index=full_inference_edges, edge_type=full_inference_etypes, num_nodes=test_data.num_nodes)
val_filtered_data = test_filtered_data
else:
# test filtering graph: inference edges + test edges
full_inference_edges = torch.cat([test_data.edge_index, test_data.target_edge_index], dim=1)
full_inference_etypes = torch.cat([test_data.edge_type, test_data.target_edge_type])
test_filtered_data = Data(edge_index=full_inference_edges, edge_type=full_inference_etypes, num_nodes=test_data.num_nodes)
# validation filtering graph: train edges + validation edges
val_filtered_data = Data(
edge_index=torch.cat([train_data.edge_index, valid_data.target_edge_index], dim=1),
edge_type=torch.cat([train_data.edge_type, valid_data.target_edge_type])
)
#test_filtered_data = val_filtered_data = None
else:
# for transductive setting, use the whole graph for filtered ranking
filtered_data = Data(edge_index=dataset._data.target_edge_index, edge_type=dataset._data.target_edge_type, num_nodes=dataset[0].num_nodes)
val_filtered_data = test_filtered_data = filtered_data
val_filtered_data = val_filtered_data.to(device)
test_filtered_data = test_filtered_data.to(device)
train_and_validate(cfg, model, train_data, valid_data, filtered_data=val_filtered_data, device=device, logger=logger)
if util.get_rank() == 0:
logger.warning(separator)
logger.warning("Evaluate on valid")
|
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
default_finetuning_config = {
# graph: (num_epochs, batches_per_epoch), null means all triples in train set
# transductive datasets (17)
# standard ones (10)
"CoDExSmall": (1, 4000),
"CoDExMedium": (1, 4000),
"CoDExLarge": (1, 2000),
"FB15k237": (1, 'null'),
"WN18RR": (1, 'null'),
"YAGO310": (1, 2000),
"DBpedia100k": (1, 1000),
"AristoV4": (1, 2000),
"ConceptNet100k": (1, 2000),
"ATOMIC": (1, 200),
# tail-only datasets (2)
"NELL995": (1, 'null'), # not implemented yet
"Hetionet": (1, 4000),
# sparse datasets (5)
"WDsinger": (3, 'null'),
"FB15k237_10": (1, 'null'),
"FB15k237_20": (1, 'null'),
"FB15k237_50": (1, 1000),
"NELL23k": (3, 'null'),
# inductive datasets (42)
# GraIL datasets (12)
"FB15k237Inductive": (1, 'null'), # for all 4 datasets
"WN18RRInductive": (1, 'null'), # for all 4 datasets
"NELLInductive": (3, 'null'), # for all 4 datasets
# ILPC (2)
"ILPC2022SmallInductive": (3, 'null'),
"ILPC2022LargeInductive": (1, 1000),
# Ingram datasets (13)
"NLIngram": (3, 'null'), # for all 5 datasets
"FBIngram": (3, 'null'), # for all 4 datasets
"WKIngram": (3, 'null'), # for all 4 datasets
# MTDEA datasets (10)
"WikiTopicsMT1": (3, 'null'), # for all 2 test datasets
"WikiTopicsMT2": (3, 'null'), # for all 2 test datasets
"WikiTopicsMT3": (3, 'null'), # for all 2 test datasets
"WikiTopicsMT4": (3, 'null'), # for all 2 test datasets
"Metafam": (3, 'null'),
"FBNELL": (3, 'null'),
# Hamaguchi datasets (4)
"HM": (1, 100) # for all 4 datasets
}
default_train_config = {
# graph: (num_epochs, batches_per_epoch), null means all triples in train set
# transductive datasets (17)
# standard ones (10)
"CoDExSmall": (10, 1000),
"CoDExMedium": (10, 1000),
"CoDExLarge": (10, 1000),
"FB15k237": (10, 1000),
"WN18RR": (10, 1000),
"YAGO310": (10, 2000),
"DBpedia100k": (10, 1000),
"AristoV4": (10, 1000),
"ConceptNet100k": (10, 1000),
"ATOMIC": (10, 1000),
# tail-only datasets (2)
"NELL995": (10, 1000), # not implemented yet
"Hetionet": (10, 1000),
# sparse datasets (5)
"WDsinger": (10, 1000),
"FB15k237_10": (10, 1000),
"FB15k237_20": (10, 1000),
"FB15k237_50": (10, 1000),
"NELL23k": (10, 1000),
# inductive datasets (42)
# GraIL datasets (12)
"FB15k237Inductive": (10, 'null'), # for all 4 datasets
"WN18RRInductive": (10, 'null'), # for all 4 datasets
"NELLInductive": (10, 'null'), # for all 4 datasets
# ILPC (2)
"ILPC2022SmallInductive": (10, 'null'),
"ILPC2022LargeInductive": (10, 1000),
# Ingram datasets (13)
"NLIngram": (10, 'null'), # for all 5 datasets
"FBIngram": (10, 'null'), # for all 4 datasets
"WKIngram": (10, 'null'), # for all 4 datasets
# MTDEA datasets (10)
"WikiTopicsMT1": (10, 'null'), # for all 2 test datasets
"WikiTopicsMT2": (10, 'null'), # for all 2 test datasets
"WikiTopicsMT3": (10, 'null'), # for all 2 test datasets
"WikiTopicsMT4": (10, 'null'), # for all 2 test datasets
"Metafam": (10, 'null'),
"FBNELL": (10, 'null'),
# Hamaguchi datasets (4)
"HM": (10, 1000) # for all 4 datasets
}
separator = ">" * 30
line = "-" * 30
def set_seed(seed):
random.seed(seed + util.get_rank())
# np.random.seed(seed + util.get_rank())
torch.manual_seed(seed + util.get_rank())
torch.cuda.manual_seed(seed + util.get_rank())
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == "__main__":
seeds = [1024, 42, 1337, 512, 256]
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="yaml configuration file", required=True)
parser.add_argument("-d", "--datasets", help="target datasets", default='FB15k237Inductive:v1,NELLInductive:v4', type=str, required=True)
parser.add_argument("-reps", "--repeats", help="number of times to repeat each exp", default=1, type=int)
parser.add_argument("-ft", "--finetune", help="finetune the checkpoint on the specified datasets", action='store_true')
parser.add_argument("-tr", "--train", help="train the model from scratch", action='store_true')
args, unparsed = parser.parse_known_args()
datasets = args.datasets.split(",")
path = os.path.dirname(os.path.expanduser(__file__))
results_file = os.path.join(path, f"ultra_results_{time.strftime('%Y-%m-%d-%H-%M-%S')}.csv")
for graph in datasets:
ds, version = graph.split(":") if ":" in graph else (graph, None)
for i in range(args.repeats):
seed = seeds[i] if i < len(seeds) else random.randint(0, 10000)
print(f"Running on {graph}, iteration {i+1} / {args.repeats}, seed: {seed}")
# get dynamic arguments defined in the config file
vars = util.detect_variables(args.config)
parser = argparse.ArgumentParser()
for var in vars:
parser.add_argument("--%s" % var)
vars = parser.parse_known_args(unparsed)[0]
vars = {k: util.literal_eval(v) for k, v in vars._get_kwargs()}
if args.finetune:
epochs, batch_per_epoch = default_finetuning_config[ds]
elif args.train:
epochs, batch_per_epoch = default_train_config[ds]
else:
epochs, batch_per_epoch = 0, 'null'
vars['epochs'] = epochs
vars['bpe'] = batch_per_epoch
vars['dataset'] = ds
if version is not None:
vars['version'] = version
cfg = util.load_config(args.config, context=vars)
root_dir = os.path.expanduser(cfg.output_dir) # resetting the path to avoid inf nesting
os.chdir(root_dir)
working_dir = util.create_working_directory(cfg)
set_seed(seed)
# args, vars = util.parse_args()
# cfg = util.load_config(args.config, context=vars)
# working_dir = util.create_working_directory(cfg)
# torch.manual_seed(args.seed + util.get_rank())
logger = util.get_root_logger()
if util.get_rank() == 0:
logger.warning("Random seed: %d" % seed)
logger.warning("Config file: %s" % args.config)
logger.warning(pprint.pformat(cfg))
task_name = cfg.task["name"]
dataset = util.build_dataset(cfg)
device = util.get_device(cfg)
train_data, valid_data, test_data = dataset[0], dataset[1], dataset[2]
train_data = train_data.to(device)
valid_data = valid_data.to(device)
test_data = test_data.to(device)
model = Ultra(
rel_model_cfg=cfg.model.relation_model,
entity_model_cfg=cfg.model.entity_model,
)
if "checkpoint" in cfg and cfg.checkpoint is not None:
state = torch.load(cfg.checkpoint, map_location="cpu")
model.load_state_dict(state["model"])
#model = pyg.compile(model, dynamic=True)
model = model.to(device)
if task_name == "InductiveInference":
# filtering for inductive datasets
# Grail, MTDEA, HM datasets have validation sets based off the training graph
# ILPC, Ingram have validation sets from the inference graph
# filtering dataset should contain all true edges (base graph + (valid) + test)
if "ILPC" in cfg.dataset['class'] or "Ingram" in cfg.dataset['class']:
# add inference, valid, test as the validation and test filtering graphs
full_inference_edges = torch.cat([valid_data.edge_index, valid_data.target_edge_index, test_data.target_edge_index], dim=1)
full_inference_etypes = torch.cat([valid_data.edge_type, valid_data.target_edge_type, test_data.target_edge_type])
test_filtered_data = Data(edge_index=full_inference_edges, edge_type=full_inference_etypes, num_nodes=test_data.num_nodes)
val_filtered_data = test_filtered_data
else:
# test filtering graph: inference edges + test edges
full_inference_edges = torch.cat([test_data.edge_index, test_data.target_edge_index], dim=1)
full_inference_etypes = torch.cat([test_data.edge_type, test_data.target_edge_type])
test_filtered_data = Data(edge_index=full_inference_edges, edge_type=full_inference_etypes, num_nodes=test_data.num_nodes)
# validation filtering graph: train edges + validation edges
val_filtered_data = Data(
edge_index=torch.cat([train_data.edge_index, valid_data.target_edge_index], dim=1),
edge_type=torch.cat([train_data.edge_type, valid_data.target_edge_type])
)
#test_filtered_data = val_filtered_data = None
else:
# for transductive setting, use the whole graph for filtered ranking
filtered_data = Data(edge_index=dataset._data.target_edge_index, edge_type=dataset._data.target_edge_type, num_nodes=dataset[0].num_nodes)
val_filtered_data = test_filtered_data = filtered_data
val_filtered_data = val_filtered_data.to(device)
test_filtered_data = test_filtered_data.to(device)
train_and_validate(cfg, model, train_data, valid_data, filtered_data=val_filtered_data, device=device, logger=logger)
if util.get_rank() == 0:
logger.warning(separator)
logger.warning("Evaluate on valid") | test(cfg, model, valid_data, filtered_data=val_filtered_data, device=device, logger=logger) | 4 | 2023-10-23 17:06:10+00:00 | 8k |
ZhengyiLuo/PerpetualHumanoidControl | phc/run.py | [
{
"identifier": "set_np_formatting",
"path": "phc/utils/config.py",
"snippet": "def set_np_formatting():\n np.set_printoptions(edgeitems=30, infstr='inf', linewidth=4000, nanstr='nan', precision=2, suppress=False, threshold=10000, formatter=None)"
},
{
"identifier": "set_seed",
"path": "phc/utils/config.py",
"snippet": "def set_seed(seed, torch_deterministic=False):\n print(\"torch_deterministic:\", torch_deterministic)\n print(\"torch_deterministic:\", torch_deterministic)\n print(\"torch_deterministic:\", torch_deterministic)\n if seed == -1 and torch_deterministic:\n seed = 42\n elif seed == -1:\n seed = np.random.randint(0, 10000)\n print(\"Setting seed: {}\".format(seed))\n\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n if torch_deterministic:\n # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.use_deterministic_algorithms(True)\n else:\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False\n\n return seed"
},
{
"identifier": "get_args",
"path": "phc/utils/config.py",
"snippet": "def get_args(benchmark=False):\n custom_parameters = [\n {\n \"name\": \"--test\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Run trained policy, no training\"\n },\n {\n \"name\": \"--debug\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Debugging, no training and no logging\"\n },\n {\n \"name\": \"--play\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Run trained policy, the same as test, can be used only by rl_games RL library\"\n },\n {\n \"name\": \"--epoch\",\n \"type\": int,\n \"default\": 0,\n \"help\": \"Resume training or start testing from a checkpoint\"\n },\n {\n \"name\": \"--checkpoint\",\n \"type\": str,\n \"default\": \"Base\",\n \"help\": \"Path to the saved weights, only for rl_games RL library\"\n },\n {\n \"name\": \"--headless\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Force display off at all times\"\n },\n {\n \"name\": \"--horovod\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Use horovod for multi-gpu training, have effect only with rl_games RL library\"\n },\n {\n \"name\": \"--task\",\n \"type\": str,\n \"default\": \"Humanoid\",\n \"help\": \"Can be BallBalance, Cartpole, CartpoleYUp, Ant, Humanoid, Anymal, FrankaCabinet, Quadcopter, ShadowHand, Ingenuity\"\n },\n {\n \"name\": \"--task_type\",\n \"type\": str,\n \"default\": \"Python\",\n \"help\": \"Choose Python or C++\"\n },\n {\n \"name\": \"--rl_device\",\n \"type\": str,\n \"default\": \"cuda:0\",\n \"help\": \"Choose CPU or GPU device for inferencing policy network\"\n },\n {\n \"name\": \"--logdir\",\n \"type\": str,\n \"default\": \"logs/\"\n },\n {\n \"name\": \"--experiment\",\n \"type\": str,\n \"default\": \"Base\",\n \"help\": \"Experiment name. If used with --metadata flag an additional information about physics engine, sim device, pipeline and domain randomization will be added to the name\"\n },\n {\n \"name\": \"--metadata\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Requires --experiment flag, adds physics engine, sim device, pipeline info and if domain randomization is used to the experiment name provided by user\"\n },\n {\n \"name\": \"--cfg_env\",\n \"type\": str,\n \"default\": \"Base\",\n \"help\": \"Environment configuration file (.yaml)\"\n },\n {\n \"name\": \"--cfg_train\",\n \"type\": str,\n \"default\": \"Base\",\n \"help\": \"Training configuration file (.yaml)\"\n },\n {\n \"name\": \"--motion_file\",\n \"type\": str,\n \"default\": \"\",\n \"help\": \"Specify reference motion file\"\n },\n {\n \"name\": \"--num_envs\",\n \"type\": int,\n \"default\": 0,\n \"help\": \"Number of environments to create - override config file\"\n },\n {\n \"name\": \"--episode_length\",\n \"type\": int,\n \"default\": 0,\n \"help\": \"Episode length, by default is read from yaml config\"\n },\n {\n \"name\": \"--seed\",\n \"type\": int,\n \"help\": \"Random seed\"\n },\n {\n \"name\": \"--max_iterations\",\n \"type\": int,\n \"default\": 0,\n \"help\": \"Set a maximum number of training iterations\"\n },\n {\n \"name\": \"--horizon_length\",\n \"type\": int,\n \"default\": -1,\n \"help\": \"Set number of simulation steps per 1 PPO iteration. Supported only by rl_games. If not -1 overrides the config settings.\"\n },\n {\n \"name\": \"--minibatch_size\",\n \"type\": int,\n \"default\": -1,\n \"help\": \"Set batch size for PPO optimization step. Supported only by rl_games. If not -1 overrides the config settings.\"\n },\n {\n \"name\": \"--randomize\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Apply physics domain randomization\"\n },\n {\n \"name\": \"--torch_deterministic\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Apply additional PyTorch settings for more deterministic behaviour\"\n },\n {\n \"name\": \"--network_path\",\n \"type\": str,\n \"default\": \"output/\",\n \"help\": \"Specify network output directory\"\n },\n {\n \"name\": \"--log_path\",\n \"type\": str,\n \"default\": \"log/\",\n \"help\": \"Specify log directory\"\n },\n {\n \"name\": \"--llc_checkpoint\",\n \"type\": str,\n \"default\": \"\",\n \"help\": \"Path to the saved weights for the low-level controller of an HRL agent.\"\n },\n {\n \"name\": \"--no_log\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"No wandb logging\"\n },\n {\n \"name\": \"--resume_str\",\n \"type\": str,\n \"default\": None,\n \"help\": \"Resuming training from a specific logging instance\"\n },\n {\n \"name\": \"--follow\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Follow Humanoid\"\n },\n {\n \"name\": \"--real_traj\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"load real_traj\"\n },\n {\n \"name\": \"--show_sensors\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"load real data mesh\"\n },\n {\n \"name\": \"--small_terrain\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"load real data mesh\"\n },\n {\n \"name\": \"--server_mode\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"load real data mesh\"\n },\n {\n \"name\": \"--add_proj\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"adding small projectiiles or not\"\n },\n {\n \"name\": \"--im_eval\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Eval imitation\"\n },\n {\n \"name\": \"--has_eval\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Eval during training or not\"\n },\n {\n \"name\": \"--no_virtual_display\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Disable virtual display\"\n },\n {\n \"name\": \"--render_o3d\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"Disable virtual display\"\n },\n \n {\n \"name\": \"--demo\",\n \"action\": \"store_true\",\n \"default\": False,\n \"help\": \"No SMPL_robot dependency\"\n },\n ]\n\n if benchmark:\n custom_parameters += [{\n \"name\": \"--num_proc\",\n \"type\": int,\n \"default\": 1,\n \"help\": \"Number of child processes to launch\"\n }, {\n \"name\": \"--random_actions\",\n \"action\": \"store_true\",\n \"help\": \"Run benchmark with random actions instead of inferencing\"\n }, {\n \"name\": \"--bench_len\",\n \"type\": int,\n \"default\": 10,\n \"help\": \"Number of timing reports\"\n }, {\n \"name\": \"--bench_file\",\n \"action\": \"store\",\n \"help\": \"Filename to store benchmark results\"\n }]\n\n # parse arguments\n args = gymutil.parse_arguments(description=\"RL Policy\", custom_parameters=custom_parameters)\n\n # allignment with examples\n args.device_id = args.compute_device_id\n args.device = args.sim_device_type if args.use_gpu_pipeline else 'cpu'\n\n if args.test:\n args.play = args.test\n args.train = False\n elif args.play:\n args.train = False\n else:\n args.train = True\n\n return args"
},
{
"identifier": "parse_sim_params",
"path": "phc/utils/config.py",
"snippet": "def parse_sim_params(args, cfg, cfg_train):\n # initialize sim\n sim_params = gymapi.SimParams()\n sim_params.dt = SIM_TIMESTEP\n sim_params.num_client_threads = args.slices\n\n if args.physics_engine == gymapi.SIM_FLEX:\n if args.device != \"cpu\":\n print(\"WARNING: Using Flex with GPU instead of PHYSX!\")\n sim_params.flex.shape_collision_margin = 0.01\n sim_params.flex.num_outer_iterations = 4\n sim_params.flex.num_inner_iterations = 10\n elif args.physics_engine == gymapi.SIM_PHYSX:\n sim_params.physx.solver_type = 1\n sim_params.physx.num_position_iterations = 4\n sim_params.physx.num_velocity_iterations = 1\n sim_params.physx.num_threads = 4\n sim_params.physx.use_gpu = args.use_gpu\n sim_params.physx.num_subscenes = args.subscenes\n if flags.test and not flags.im_eval:\n sim_params.physx.max_gpu_contact_pairs = 4 * 1024 * 1024\n else:\n sim_params.physx.max_gpu_contact_pairs = 16 * 1024 * 1024\n\n sim_params.use_gpu_pipeline = args.use_gpu_pipeline\n sim_params.physx.use_gpu = args.use_gpu\n\n # if sim options are provided in cfg, parse them and update/override above:\n if \"sim\" in cfg:\n gymutil.parse_sim_config(cfg[\"sim\"], sim_params)\n\n # Override num_threads if passed on the command line\n if args.physics_engine == gymapi.SIM_PHYSX and args.num_threads > 0:\n sim_params.physx.num_threads = args.num_threads\n\n return sim_params"
},
{
"identifier": "load_cfg",
"path": "phc/utils/config.py",
"snippet": "def load_cfg(args):\n with open(os.path.join(os.getcwd(), args.cfg_train), 'r') as f:\n cfg_train = yaml.load(f, Loader=yaml.SafeLoader)\n\n with open(os.path.join(os.getcwd(), args.cfg_env), 'r') as f:\n cfg = yaml.load(f, Loader=yaml.SafeLoader)\n\n # Override number of environments if passed on the command line\n if args.num_envs > 0:\n cfg[\"env\"][\"numEnvs\"] = args.num_envs\n\n if args.episode_length > 0:\n cfg[\"env\"][\"episodeLength\"] = args.episode_length\n\n cfg[\"name\"] = args.task\n cfg[\"headless\"] = args.headless\n\n # Set physics domain randomization\n if \"task\" in cfg:\n if \"randomize\" not in cfg[\"task\"]:\n cfg[\"task\"][\"randomize\"] = args.randomize\n else:\n cfg[\"task\"][\"randomize\"] = args.randomize or cfg[\"task\"][\"randomize\"]\n else:\n cfg[\"task\"] = {\"randomize\": False}\n\n logdir = args.logdir\n # Set deterministic mode\n if args.torch_deterministic:\n cfg_train[\"params\"][\"torch_deterministic\"] = True\n\n exp_name = cfg_train[\"params\"][\"config\"]['name']\n\n if args.experiment != 'Base':\n if args.metadata:\n exp_name = \"{}_{}_{}_{}\".format(args.experiment, args.task_type, args.device, str(args.physics_engine).split(\"_\")[-1])\n\n if cfg[\"task\"][\"randomize\"]:\n exp_name += \"_DR\"\n else:\n exp_name = args.experiment\n\n # Override config name\n cfg_train[\"params\"][\"config\"]['name'] = exp_name\n\n if args.epoch > 0:\n cfg_train[\"params\"][\"load_checkpoint\"] = True\n cfg_train[\"params\"][\"load_path\"] = osp.join(args.network_path, exp_name + \"_\" + str(args.epoch).zfill(8) + '.pth')\n args.checkpoint = cfg_train[\"params\"][\"load_path\"]\n elif args.epoch == -1:\n path = osp.join(args.network_path, exp_name + '.pth')\n if osp.exists(path):\n cfg_train[\"params\"][\"load_path\"] = path\n cfg_train[\"params\"][\"load_checkpoint\"] = True\n args.checkpoint = cfg_train[\"params\"][\"load_path\"]\n else:\n print(\"no file to resume!!!!\")\n \n\n # if args.checkpoint != \"Base\":\n # cfg_train[\"params\"][\"load_path\"] = osp.join(args.network_path, exp_name + \"_\" + str(args.epoch).zfill(8) + '.pth')\n\n if args.llc_checkpoint != \"\":\n cfg_train[\"params\"][\"config\"][\"llc_checkpoint\"] = args.llc_checkpoint\n\n # Set maximum number of training iterations (epochs)\n if args.max_iterations > 0:\n cfg_train[\"params\"][\"config\"]['max_epochs'] = args.max_iterations\n\n cfg_train[\"params\"][\"config\"][\"num_actors\"] = cfg[\"env\"][\"numEnvs\"]\n\n seed = cfg_train[\"params\"].get(\"seed\", -1)\n if args.seed is not None:\n seed = args.seed\n cfg[\"seed\"] = seed\n cfg_train[\"params\"][\"seed\"] = seed\n\n cfg[\"args\"] = args\n\n return cfg, cfg_train, logdir"
},
{
"identifier": "parse_task",
"path": "phc/utils/parse_task.py",
"snippet": "def parse_task(args, cfg, cfg_train, sim_params):\n\n # create native task and pass custom config\n device_id = args.device_id\n rl_device = args.rl_device\n\n cfg[\"seed\"] = cfg_train.get(\"seed\", -1)\n cfg_task = cfg[\"env\"]\n cfg_task[\"seed\"] = cfg[\"seed\"]\n\n task = eval(args.task)(cfg=cfg, sim_params=sim_params, physics_engine=args.physics_engine, device_type=args.device, device_id=device_id, headless=args.headless)\n env = VecTaskPythonWrapper(task, rl_device, cfg_train.get(\"clip_observations\", np.inf))\n\n return task, env"
},
{
"identifier": "flags",
"path": "phc/utils/flags.py",
"snippet": "class Flags(object):\n def __init__(self, items):"
}
] | import glob
import os
import sys
import pdb
import os.path as osp
import numpy as np
import copy
import torch
import wandb
import horovod.torch as hvd
from phc.utils.config import set_np_formatting, set_seed, get_args, parse_sim_params, load_cfg
from phc.utils.parse_task import parse_task
from rl_games.algos_torch import players
from rl_games.algos_torch import torch_ext
from rl_games.common import env_configurations, experiment, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.torch_runner import Runner
from phc.utils.flags import flags
from learning import im_amp
from learning import im_amp_players
from learning import amp_agent
from learning import amp_players
from learning import amp_models
from learning import amp_network_builder
from learning import amp_network_mcp_builder
from learning import amp_network_pnn_builder
from env.tasks import humanoid_amp_task | 5,895 | return
def after_init(self, algo):
self.algo = algo
self.consecutive_successes = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.writer = self.algo.writer
return
def process_infos(self, infos, done_indices):
if isinstance(infos, dict):
if (self.use_successes == False) and 'consecutive_successes' in infos:
cons_successes = infos['consecutive_successes'].clone()
self.consecutive_successes.update(cons_successes.to(self.algo.ppo_device))
if self.use_successes and 'successes' in infos:
successes = infos['successes'].clone()
self.consecutive_successes.update(successes[done_indices].to(self.algo.ppo_device))
return
def after_clear_stats(self):
self.mean_scores.clear()
return
def after_print_stats(self, frame, epoch_num, total_time):
if self.consecutive_successes.current_size > 0:
mean_con_successes = self.consecutive_successes.get_mean()
self.writer.add_scalar('successes/consecutive_successes/mean', mean_con_successes, frame)
self.writer.add_scalar('successes/consecutive_successes/iter', mean_con_successes, epoch_num)
self.writer.add_scalar('successes/consecutive_successes/time', mean_con_successes, total_time)
return
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
self.use_global_obs = (self.env.num_states > 0)
self.full_state = {}
self.full_state["obs"] = self.reset()
if self.use_global_obs:
self.full_state["states"] = self.env.get_state()
return
def step(self, action):
next_obs, reward, is_done, info = self.env.step(action)
# todo: improve, return only dictinary
self.full_state["obs"] = next_obs
if self.use_global_obs:
self.full_state["states"] = self.env.get_state()
return self.full_state, reward, is_done, info
else:
return self.full_state["obs"], reward, is_done, info
def reset(self, env_ids=None):
self.full_state["obs"] = self.env.reset(env_ids)
if self.use_global_obs:
self.full_state["states"] = self.env.get_state()
return self.full_state
else:
return self.full_state["obs"]
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
info['amp_observation_space'] = self.env.amp_observation_space
info['enc_amp_observation_space'] = self.env.enc_amp_observation_space
if isinstance(self.env.task, humanoid_amp_task.HumanoidAMPTask):
info['task_obs_size'] = self.env.task.get_task_obs_size()
else:
info['task_obs_size'] = 0
if self.use_global_obs:
info['state_space'] = self.env.state_space
print(info['action_space'], info['observation_space'], info['state_space'])
else:
print(info['action_space'], info['observation_space'])
return info
vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('rlgpu', {'env_creator': lambda **kwargs: create_rlgpu_env(**kwargs), 'vecenv_type': 'RLGPU'})
def build_alg_runner(algo_observer):
runner = Runner(algo_observer)
runner.player_factory.register_builder('amp_discrete', lambda **kwargs: amp_players.AMPPlayerDiscrete(**kwargs))
runner.algo_factory.register_builder('amp', lambda **kwargs: amp_agent.AMPAgent(**kwargs))
runner.player_factory.register_builder('amp', lambda **kwargs: amp_players.AMPPlayerContinuous(**kwargs))
runner.model_builder.model_factory.register_builder('amp', lambda network, **kwargs: amp_models.ModelAMPContinuous(network))
runner.model_builder.network_factory.register_builder('amp', lambda **kwargs: amp_network_builder.AMPBuilder())
runner.model_builder.network_factory.register_builder('amp_mcp', lambda **kwargs: amp_network_mcp_builder.AMPMCPBuilder())
runner.model_builder.network_factory.register_builder('amp_pnn', lambda **kwargs: amp_network_pnn_builder.AMPPNNBuilder())
runner.algo_factory.register_builder('im_amp', lambda **kwargs: im_amp.IMAmpAgent(**kwargs))
runner.player_factory.register_builder('im_amp', lambda **kwargs: im_amp_players.IMAMPPlayerContinuous(**kwargs))
return runner
def main():
global args
global cfg
global cfg_train
set_np_formatting()
args = get_args()
cfg_env_name = args.cfg_env.split("/")[-1].split(".")[0]
args.logdir = args.network_path
cfg, cfg_train, logdir = load_cfg(args)
| # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
sys.path.append(os.getcwd())
args = None
cfg = None
cfg_train = None
def create_rlgpu_env(**kwargs):
use_horovod = cfg_train['params']['config'].get('multi_gpu', False)
if use_horovod:
rank = hvd.rank()
print("Horovod rank: ", rank)
cfg_train['params']['seed'] = cfg_train['params']['seed'] + rank
args.device = 'cuda'
args.device_id = rank
args.rl_device = 'cuda:' + str(rank)
cfg['rank'] = rank
cfg['rl_device'] = 'cuda:' + str(rank)
sim_params = parse_sim_params(args, cfg, cfg_train)
task, env = parse_task(args, cfg, cfg_train, sim_params)
print(env.num_envs)
print(env.num_actions)
print(env.num_obs)
print(env.num_states)
frames = kwargs.pop('frames', 1)
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
return env
class RLGPUAlgoObserver(AlgoObserver):
def __init__(self, use_successes=True):
self.use_successes = use_successes
return
def after_init(self, algo):
self.algo = algo
self.consecutive_successes = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.writer = self.algo.writer
return
def process_infos(self, infos, done_indices):
if isinstance(infos, dict):
if (self.use_successes == False) and 'consecutive_successes' in infos:
cons_successes = infos['consecutive_successes'].clone()
self.consecutive_successes.update(cons_successes.to(self.algo.ppo_device))
if self.use_successes and 'successes' in infos:
successes = infos['successes'].clone()
self.consecutive_successes.update(successes[done_indices].to(self.algo.ppo_device))
return
def after_clear_stats(self):
self.mean_scores.clear()
return
def after_print_stats(self, frame, epoch_num, total_time):
if self.consecutive_successes.current_size > 0:
mean_con_successes = self.consecutive_successes.get_mean()
self.writer.add_scalar('successes/consecutive_successes/mean', mean_con_successes, frame)
self.writer.add_scalar('successes/consecutive_successes/iter', mean_con_successes, epoch_num)
self.writer.add_scalar('successes/consecutive_successes/time', mean_con_successes, total_time)
return
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
self.use_global_obs = (self.env.num_states > 0)
self.full_state = {}
self.full_state["obs"] = self.reset()
if self.use_global_obs:
self.full_state["states"] = self.env.get_state()
return
def step(self, action):
next_obs, reward, is_done, info = self.env.step(action)
# todo: improve, return only dictinary
self.full_state["obs"] = next_obs
if self.use_global_obs:
self.full_state["states"] = self.env.get_state()
return self.full_state, reward, is_done, info
else:
return self.full_state["obs"], reward, is_done, info
def reset(self, env_ids=None):
self.full_state["obs"] = self.env.reset(env_ids)
if self.use_global_obs:
self.full_state["states"] = self.env.get_state()
return self.full_state
else:
return self.full_state["obs"]
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
info['amp_observation_space'] = self.env.amp_observation_space
info['enc_amp_observation_space'] = self.env.enc_amp_observation_space
if isinstance(self.env.task, humanoid_amp_task.HumanoidAMPTask):
info['task_obs_size'] = self.env.task.get_task_obs_size()
else:
info['task_obs_size'] = 0
if self.use_global_obs:
info['state_space'] = self.env.state_space
print(info['action_space'], info['observation_space'], info['state_space'])
else:
print(info['action_space'], info['observation_space'])
return info
vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('rlgpu', {'env_creator': lambda **kwargs: create_rlgpu_env(**kwargs), 'vecenv_type': 'RLGPU'})
def build_alg_runner(algo_observer):
runner = Runner(algo_observer)
runner.player_factory.register_builder('amp_discrete', lambda **kwargs: amp_players.AMPPlayerDiscrete(**kwargs))
runner.algo_factory.register_builder('amp', lambda **kwargs: amp_agent.AMPAgent(**kwargs))
runner.player_factory.register_builder('amp', lambda **kwargs: amp_players.AMPPlayerContinuous(**kwargs))
runner.model_builder.model_factory.register_builder('amp', lambda network, **kwargs: amp_models.ModelAMPContinuous(network))
runner.model_builder.network_factory.register_builder('amp', lambda **kwargs: amp_network_builder.AMPBuilder())
runner.model_builder.network_factory.register_builder('amp_mcp', lambda **kwargs: amp_network_mcp_builder.AMPMCPBuilder())
runner.model_builder.network_factory.register_builder('amp_pnn', lambda **kwargs: amp_network_pnn_builder.AMPPNNBuilder())
runner.algo_factory.register_builder('im_amp', lambda **kwargs: im_amp.IMAmpAgent(**kwargs))
runner.player_factory.register_builder('im_amp', lambda **kwargs: im_amp_players.IMAMPPlayerContinuous(**kwargs))
return runner
def main():
global args
global cfg
global cfg_train
set_np_formatting()
args = get_args()
cfg_env_name = args.cfg_env.split("/")[-1].split(".")[0]
args.logdir = args.network_path
cfg, cfg_train, logdir = load_cfg(args) | flags.debug, flags.follow, flags.fixed, flags.divide_group, flags.no_collision_check, flags.fixed_path, flags.real_path, flags.small_terrain, flags.show_traj, flags.server_mode, flags.slow, flags.real_traj, flags.im_eval, flags.no_virtual_display, flags.render_o3d = \ | 6 | 2023-10-15 19:05:47+00:00 | 8k |
uni-medical/SAM-Med3D | segment_anything/modeling/sam3D.py | [
{
"identifier": "ImageEncoderViT3D",
"path": "segment_anything/modeling/image_encoder3D.py",
"snippet": "class ImageEncoderViT3D(nn.Module):\n def __init__(\n self,\n img_size: int = 256,\n patch_size: int = 16,\n in_chans: int = 1,\n embed_dim: int = 768,\n depth: int = 12,\n num_heads: int = 12,\n mlp_ratio: float = 4.0,\n out_chans: int = 256,\n qkv_bias: bool = True,\n norm_layer: Type[nn.Module] = nn.LayerNorm,\n act_layer: Type[nn.Module] = nn.GELU,\n use_abs_pos: bool = True,\n use_rel_pos: bool = False,\n rel_pos_zero_init: bool = True,\n window_size: int = 0,\n global_attn_indexes: Tuple[int, ...] = (),\n ) -> None:\n \"\"\"\n Args:\n img_size (int): Input image size.\n patch_size (int): Patch size.\n in_chans (int): Number of input image channels.\n embed_dim (int): Patch embedding dimension.\n depth (int): Depth of ViT.\n num_heads (int): Number of attention heads in each ViT block.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool): If True, add a learnable bias to query, key, value.\n norm_layer (nn.Module): Normalization layer.\n act_layer (nn.Module): Activation layer.\n use_abs_pos (bool): If True, use absolute positional embeddings.\n use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n window_size (int): Window size for window attention blocks.\n global_attn_indexes (list): Indexes for blocks using global attention.\n \"\"\"\n super().__init__()\n self.img_size = img_size\n\n self.patch_embed = PatchEmbed3D(\n kernel_size=(patch_size, patch_size, patch_size),\n stride=(patch_size, patch_size, patch_size),\n in_chans=in_chans,\n embed_dim=embed_dim,\n )\n\n self.pos_embed: Optional[nn.Parameter] = None\n if use_abs_pos:\n # Initialize absolute positional embedding with pretrain image size.\n self.pos_embed = nn.Parameter(\n torch.zeros(1, img_size // patch_size, img_size // patch_size, img_size // patch_size, embed_dim)\n )\n\n self.blocks = nn.ModuleList()\n for i in range(depth):\n block = Block3D(\n dim=embed_dim,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n norm_layer=norm_layer,\n act_layer=act_layer,\n use_rel_pos=use_rel_pos,\n rel_pos_zero_init=rel_pos_zero_init,\n window_size=window_size if i not in global_attn_indexes else 0,\n input_size=(img_size // patch_size, img_size // patch_size, img_size // patch_size),\n )\n self.blocks.append(block)\n\n self.neck = nn.Sequential(\n nn.Conv3d(\n embed_dim,\n out_chans,\n kernel_size=1,\n bias=False,\n ),\n # nn.LayerNorm(out_chans),\n LayerNorm3d(out_chans),\n nn.Conv3d(\n out_chans,\n out_chans,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm3d(out_chans),\n # nn.LayerNorm(out_chans),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # input_size = [1,1,256,256,256]\n # import IPython; IPython.embed()\n x = self.patch_embed(x)\n # x = [1,16,16,16,768]\n # import pdb; pdb.set_trace()\n if self.pos_embed is not None:\n x = x + self.pos_embed\n\n for blk in self.blocks:\n x = blk(x)\n # x = [1,16,16,16,768]\n x = self.neck(x.permute(0, 4, 1, 2, 3))\n\n # output_size = [1,256,16,16,16]\n return x"
},
{
"identifier": "MaskDecoder3D",
"path": "segment_anything/modeling/mask_decoder3D.py",
"snippet": "class MaskDecoder3D(nn.Module):\n def __init__(\n self,\n *,\n transformer_dim: int,\n # transformer: nn.Module ,\n num_multimask_outputs: int = 3,\n activation: Type[nn.Module] = nn.GELU,\n iou_head_depth: int = 3,\n iou_head_hidden_dim: int = 256,\n ) -> None:\n \"\"\"\n Predicts masks given an image and prompt embeddings, using a\n transformer architecture.\n\n Arguments:\n transformer_dim (int): the channel dimension of the transformer\n transformer (nn.Module): the transformer used to predict masks\n num_multimask_outputs (int): the number of masks to predict\n when disambiguating masks\n activation (nn.Module): the type of activation to use when\n upscaling masks\n iou_head_depth (int): the depth of the MLP used to predict\n mask quality\n iou_head_hidden_dim (int): the hidden dimension of the MLP\n used to predict mask quality\n \"\"\"\n super().__init__()\n self.transformer_dim = transformer_dim\n # self.transformer = transformer\n self.transformer = TwoWayTransformer3D(\n depth=2,\n embedding_dim=self.transformer_dim,\n mlp_dim=2048,\n num_heads=8,\n )\n\n self.num_multimask_outputs = num_multimask_outputs\n\n self.iou_token = nn.Embedding(1, transformer_dim)\n self.num_mask_tokens = num_multimask_outputs + 1\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n\n self.output_upscaling = nn.Sequential(\n nn.ConvTranspose3d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\n LayerNorm3d(transformer_dim // 4),\n activation(),\n nn.ConvTranspose3d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\n activation(),\n )\n self.output_hypernetworks_mlps = nn.ModuleList(\n [\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\n for i in range(self.num_mask_tokens)\n ]\n )\n\n self.iou_prediction_head = MLP(\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\n )\n\n def forward(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n multimask_output: bool,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks given image and prompt embeddings.\n\n Arguments:\n image_embeddings (torch.Tensor): the embeddings from the image encoder\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\n multimask_output (bool): Whether to return multiple masks or a single\n mask.\n\n Returns:\n torch.Tensor: batched predicted masks\n torch.Tensor: batched predictions of mask quality\n \"\"\"\n masks, iou_pred = self.predict_masks(\n image_embeddings=image_embeddings,\n image_pe=image_pe,\n sparse_prompt_embeddings=sparse_prompt_embeddings,\n dense_prompt_embeddings=dense_prompt_embeddings,\n )\n\n # Select the correct mask or masks for output\n if multimask_output:\n mask_slice = slice(1, None)\n else:\n mask_slice = slice(0, 1)\n masks = masks[:, mask_slice, :, :]\n iou_pred = iou_pred[:, mask_slice]\n\n # Prepare output\n return masks, iou_pred\n\n def predict_masks(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\n # Concatenate output tokens\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\n\n # Expand per-image data in batch direction to be per-mask\n if image_embeddings.shape[0] != tokens.shape[0]:\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\n else:\n src = image_embeddings\n src = src + dense_prompt_embeddings\n if image_pe.shape[0] != tokens.shape[0]:\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\n else:\n pos_src = image_pe\n b, c, x, y, z = src.shape\n\n # Run the transformer\n # import IPython; IPython.embed()\n hs, src = self.transformer(src, pos_src, tokens)\n iou_token_out = hs[:, 0, :]\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\n\n # Upscale mask embeddings and predict masks using the mask tokens\n src = src.transpose(1, 2).view(b, c, x, y, z)\n upscaled_embedding = self.output_upscaling(src)\n hyper_in_list: List[torch.Tensor] = []\n for i in range(self.num_mask_tokens):\n hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\n hyper_in = torch.stack(hyper_in_list, dim=1)\n b, c, x, y, z = upscaled_embedding.shape\n masks = (hyper_in @ upscaled_embedding.view(b, c, x * y * z)).view(b, -1, x, y, z)\n\n # Generate mask quality predictions\n iou_pred = self.iou_prediction_head(iou_token_out)\n\n return masks, iou_pred"
},
{
"identifier": "PromptEncoder3D",
"path": "segment_anything/modeling/prompt_encoder3D.py",
"snippet": "class PromptEncoder3D(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n image_embedding_size: Tuple[int, int, int],\n input_image_size: Tuple[int, int, int],\n mask_in_chans: int,\n activation: Type[nn.Module] = nn.GELU,\n ) -> None:\n \"\"\"\n Encodes prompts for input to SAM's mask decoder.\n\n Arguments:\n embed_dim (int): The prompts' embedding dimension\n image_embedding_size (tuple(int, int)): The spatial size of the\n image embedding, as (H, W).\n input_image_size (int): The padded size of the image as input\n to the image encoder, as (H, W).\n mask_in_chans (int): The number of hidden channels used for\n encoding input masks.\n activation (nn.Module): The activation to use when encoding\n input masks.\n \"\"\"\n super().__init__()\n self.embed_dim = embed_dim\n self.input_image_size = input_image_size\n self.image_embedding_size = image_embedding_size\n self.pe_layer = PositionEmbeddingRandom3D(embed_dim // 3)\n\n self.num_point_embeddings: int = 2 # pos/neg point\n point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]\n self.point_embeddings = nn.ModuleList(point_embeddings)\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\n\n self.mask_input_size = (image_embedding_size[0], image_embedding_size[1], image_embedding_size[2])\n self.mask_downscaling = nn.Sequential(\n nn.Conv3d(1, mask_in_chans // 4, kernel_size=2, stride=2),\n LayerNorm3d(mask_in_chans // 4),\n activation(),\n nn.Conv3d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\n LayerNorm3d(mask_in_chans),\n activation(),\n nn.Conv3d(mask_in_chans, embed_dim, kernel_size=1),\n )\n self.no_mask_embed = nn.Embedding(1, embed_dim)\n\n def get_dense_pe(self) -> torch.Tensor:\n \"\"\"\n Returns the positional encoding used to encode point prompts,\n applied to a dense set of points the shape of the image encoding.\n\n Returns:\n torch.Tensor: Positional encoding with shape\n 1x(embed_dim)x(embedding_h)x(embedding_w)\n \"\"\"\n return self.pe_layer(self.image_embedding_size).unsqueeze(0) # 1xXxYxZ\n\n def _embed_points(\n self,\n points: torch.Tensor,\n labels: torch.Tensor,\n pad: bool,\n ) -> torch.Tensor:\n \"\"\"Embeds point prompts.\"\"\"\n points = points + 0.5 # Shift to center of pixel\n if pad:\n padding_point = torch.zeros((points.shape[0], 1, 3), device=points.device)\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n points = torch.cat([points, padding_point], dim=1)\n labels = torch.cat([labels, padding_label], dim=1)\n point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\n point_embedding[labels == -1] = 0.0\n point_embedding[labels == -1] += self.not_a_point_embed.weight\n point_embedding[labels == 0] += self.point_embeddings[0].weight\n point_embedding[labels == 1] += self.point_embeddings[1].weight\n return point_embedding\n\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds box prompts.\"\"\"\n boxes = boxes + 0.5 # Shift to center of pixel\n coords = boxes.reshape(-1, 2, 2)\n corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n return corner_embedding\n\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds mask inputs.\"\"\"\n mask_embedding = self.mask_downscaling(masks)\n return mask_embedding\n\n def _get_batch_size(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> int:\n \"\"\"\n Gets the batch size of the output given the batch size of the input prompts.\n \"\"\"\n if points is not None:\n return points[0].shape[0]\n elif boxes is not None:\n return boxes.shape[0]\n elif masks is not None:\n return masks.shape[0]\n else:\n return 1\n\n def _get_device(self) -> torch.device:\n return self.point_embeddings[0].weight.device\n\n def forward(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Embeds different types of prompts, returning both sparse and dense\n embeddings.\n\n Arguments:\n points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\n and labels to embed.\n boxes (torch.Tensor or none): boxes to embed\n masks (torch.Tensor or none): masks to embed\n\n Returns:\n torch.Tensor: sparse embeddings for the points and boxes, with shape\n BxNx(embed_dim), where N is determined by the number of input points\n and boxes.\n torch.Tensor: dense embeddings for the masks, in the shape\n Bx(embed_dim)x(embed_H)x(embed_W)\n \"\"\"\n bs = self._get_batch_size(points, boxes, masks)\n sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\n if points is not None:\n coords, labels = points\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\n if boxes is not None:\n box_embeddings = self._embed_boxes(boxes)\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\n\n if masks is not None:\n dense_embeddings = self._embed_masks(masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1, 1).expand(\n bs, -1, self.image_embedding_size[0], self.image_embedding_size[1], self.image_embedding_size[2]\n )\n\n return sparse_embeddings, dense_embeddings"
}
] | import torch
from torch import nn
from torch.nn import functional as F
from typing import Any, Dict, List, Tuple
from .image_encoder3D import ImageEncoderViT3D
from .mask_decoder3D import MaskDecoder3D
from .prompt_encoder3D import PromptEncoder3D | 4,342 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam3D(nn.Module):
mask_threshold: float = 0.0
image_format: str = "L"
def __init__(
self,
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam3D(nn.Module):
mask_threshold: float = 0.0
image_format: str = "L"
def __init__(
self, | image_encoder: ImageEncoderViT3D, | 0 | 2023-10-23 15:41:07+00:00 | 8k |
MolecularAI/REINVENT4 | reinvent/runmodes/create_model/libinvent.py | [
{
"identifier": "DecoratorModel",
"path": "reinvent/models/libinvent/models/model.py",
"snippet": "class DecoratorModel:\n _model_type = \"Libinvent\"\n _version = 1\n\n def __init__(\n self,\n vocabulary,\n decorator,\n max_sequence_length=256,\n mode=ModelModeEnum.TRAINING,\n device=torch.device(\"cpu\"),\n ):\n \"\"\"\n Implements the likelihood and scaffold_decorating functions of the decorator model.\n :param vocabulary: A DecoratorVocabulary instance with the vocabularies of both the encoder and decoder.\n :param decorator: An decorator network instance.\n :param max_sequence_length: Maximium number of tokens allowed to sample.\n :param mode: Mode in which the model should be initialized.\n :return:\n \"\"\"\n\n self.vocabulary = vocabulary\n self.max_sequence_length = max_sequence_length\n\n self._model_modes = ModelModeEnum()\n self.network = decorator\n self.network.to(device)\n self.device = device\n self.set_mode(mode)\n\n self._nll_loss = tnn.NLLLoss(reduction=\"none\", ignore_index=0)\n\n @classmethod\n def load_from_file(cls, file_path: str, mode: str, device: torch.device):\n \"\"\"\n Loads a model from a single file\n :param file_path: Path to the saved model.\n :return: An instance of the RNN.\n \"\"\"\n\n save_dict = torch.load(file_path, map_location=device)\n return cls.create_from_dict(save_dict, mode, device)\n\n @classmethod\n def create_from_dict(cls, save_dict: dict, mode: str, device: torch.device):\n model_type = save_dict.get(\"model_type\")\n\n if model_type and model_type != cls._model_type:\n raise RuntimeError(f\"Wrong type: {model_type} but expected {cls._model_type}\")\n\n decorator = Decorator(**save_dict[\"decorator\"][\"params\"])\n decorator.load_state_dict(save_dict[\"decorator\"][\"state\"])\n\n model = cls(decorator=decorator, mode=mode, device=device, **save_dict[\"model\"])\n\n return model\n\n def get_save_dict(self):\n \"\"\"Return the layout of the save dictionary\"\"\"\n\n save_dict = dict(\n model_type=self._model_type,\n version=self._version,\n model=dict(\n vocabulary=self.vocabulary,\n max_sequence_length=self.max_sequence_length,\n ),\n decorator=dict(\n params=self.network.get_params(),\n state=self.network.state_dict(),\n ),\n )\n\n return save_dict\n\n def save(self, path):\n \"\"\"\n Saves the model to a file.\n :param path: Path to the file which the model will be saved to.\n \"\"\"\n\n save_dict = self.get_save_dict()\n\n torch.save(save_dict, path)\n\n save_to_file = save # alias for backwards compatibility\n\n def set_mode(self, mode):\n \"\"\"\n Changes the mode of the RNN to training or eval.\n :param mode: Mode to change to (training, eval)\n :return: The model instance.\n \"\"\"\n if mode == self._model_modes.INFERENCE:\n self.network.eval()\n else:\n self.network.train()\n return self\n\n def likelihood(\n self,\n scaffold_seqs,\n scaffold_seq_lengths,\n decoration_seqs,\n decoration_seq_lengths,\n ):\n \"\"\"\n Retrieves the likelihood of a scaffold and its respective decorations.\n :param scaffold_seqs: (batch, seq) A batch of padded scaffold sequences.\n :param scaffold_seq_lengths: The length of the scaffold sequences (for packing purposes).\n :param decoration_seqs: (batch, seq) A batch of decorator sequences.\n :param decoration_seq_lengths: The length of the decorator sequences (for packing purposes).\n :return: (batch) Log likelihood for each item in the batch.\n \"\"\"\n\n # NOTE: the decoration_seq_lengths have a - 1 to prevent the end token to be forward-passed.\n logits = self.network(\n scaffold_seqs,\n scaffold_seq_lengths,\n decoration_seqs,\n decoration_seq_lengths - 1,\n ) # (batch, seq - 1, voc)\n log_probs = logits.log_softmax(dim=2).transpose(1, 2) # (batch, voc, seq - 1)\n return self._nll_loss(log_probs, decoration_seqs[:, 1:]).sum(dim=1) # (batch)\n\n @torch.no_grad()\n def sample_decorations(\n self, scaffold_seqs, scaffold_seq_lengths\n ) -> Tuple[List[str], List[str], List[float]]:\n \"\"\"\n Samples as many decorations as scaffolds in the tensor.\n :param scaffold_seqs: a tensor with the scaffolds to sample already encoded and padded.\n :param scaffold_seq_lengths: a tensor with the length of the scaffolds.\n :return: a generator with (scaffold_smi, decoration_smi, nll) triplets.\n \"\"\"\n batch_size = scaffold_seqs.size(0)\n\n input_vector = torch.full(\n (batch_size, 1),\n self.vocabulary.decoration_vocabulary[\"^\"],\n dtype=torch.long,\n ) # (batch, 1)\n\n seq_lengths = torch.ones(batch_size) # (batch)\n encoder_padded_seqs, hidden_states = self.network.forward_encoder(\n scaffold_seqs, scaffold_seq_lengths\n )\n\n nlls = torch.zeros(batch_size)\n not_finished = torch.ones(batch_size, 1, dtype=torch.long)\n sequences = []\n\n for _ in range(self.max_sequence_length - 1):\n logits, hidden_states, _ = self.network.forward_decoder(\n input_vector, seq_lengths, encoder_padded_seqs, hidden_states\n ) # (batch, 1, voc)\n\n probs = logits.softmax(dim=2).squeeze() # (batch, voc)\n log_probs = logits.log_softmax(dim=2).squeeze() # (batch, voc)\n input_vector = torch.multinomial(probs, 1) * not_finished # (batch, 1)\n sequences.append(input_vector)\n nlls += self._nll_loss(log_probs, input_vector.squeeze())\n not_finished = (input_vector > 1).type(torch.long) # 0 is padding, 1 is end token\n\n if not_finished.sum() == 0:\n break\n\n decoration_smiles = [\n self.vocabulary.decode_decoration(seq)\n for seq in torch.cat(sequences, 1).data.cpu().numpy()\n ]\n\n scaffold_smiles = [\n self.vocabulary.decode_scaffold(seq) for seq in scaffold_seqs.data.cpu().numpy()\n ]\n\n return scaffold_smiles, decoration_smiles, nlls.data.cpu().numpy()\n\n def get_network_parameters(self):\n return self.network.parameters()"
},
{
"identifier": "DecoratorVocabulary",
"path": "reinvent/models/libinvent/models/vocabulary.py",
"snippet": "class DecoratorVocabulary:\n \"\"\"\n Encapsulation of the two vocabularies needed for the decorator.\n \"\"\"\n\n def __init__(self, scaffold_vocabulary, scaffold_tokenizer, decoration_vocabulary, decoration_tokenizer):\n self.scaffold_vocabulary = scaffold_vocabulary\n self.scaffold_tokenizer = scaffold_tokenizer\n self.decoration_vocabulary = decoration_vocabulary\n self.decoration_tokenizer = decoration_tokenizer\n\n def len_scaffold(self):\n \"\"\"\n Returns the length of the scaffold vocabulary.\n \"\"\"\n return len(self.scaffold_vocabulary)\n\n def len_decoration(self):\n \"\"\"\n Returns the length of the decoration vocabulary.\n \"\"\"\n return len(self.decoration_vocabulary)\n\n def len(self):\n \"\"\"\n Returns the lenght of both vocabularies in a tuple.\n :return: A tuple with (len(scaff_voc), len(dec_voc)).\n \"\"\"\n return (len(self.scaffold_vocabulary), len(self.decoration_vocabulary))\n\n def encode_scaffold(self, smiles):\n \"\"\"\n Encodes a scaffold SMILES.\n :param smiles: Scaffold SMILES to encode.\n :return : An one-hot-encoded vector with the scaffold information.\n \"\"\"\n return self.scaffold_vocabulary.encode(self.scaffold_tokenizer.tokenize(smiles))\n\n def decode_scaffold(self, encoded_scaffold):\n \"\"\"\n Decodes the scaffold.\n :param encoded_scaffold: A one-hot encoded version of the scaffold.\n :return : A SMILES of the scaffold.\n \"\"\"\n return self.scaffold_tokenizer.untokenize(self.scaffold_vocabulary.decode(encoded_scaffold))\n\n def encode_decoration(self, smiles):\n \"\"\"\n Encodes a decoration SMILES.\n :param smiles: Decoration SMILES to encode.\n :return : An one-hot-encoded vector with the fragment information.\n \"\"\"\n return self.decoration_vocabulary.encode(self.decoration_tokenizer.tokenize(smiles))\n\n def decode_decoration(self, encoded_decoration):\n \"\"\"\n Decodes the decorations for a scaffold.\n :param encoded_decorations: A one-hot encoded version of the decoration.\n :return : A list with SMILES of all the fragments.\n \"\"\"\n return self.decoration_tokenizer.untokenize(self.decoration_vocabulary.decode(encoded_decoration))\n\n @classmethod\n def from_lists(cls, scaffold_list, decoration_list):\n \"\"\"\n Creates the vocabularies from lists.\n :param scaffold_list: A list with scaffolds.\n :param decoration_list: A list with decorations.\n :return : A DecoratorVocabulary instance\n \"\"\"\n scaffold_tokenizer = SMILESTokenizer()\n scaffold_vocabulary = create_vocabulary(scaffold_list, scaffold_tokenizer)\n\n decoration_tokenizer = SMILESTokenizer()\n decoration_vocabulary = create_vocabulary(decoration_list, decoration_tokenizer)\n\n return cls(scaffold_vocabulary, scaffold_tokenizer, decoration_vocabulary, decoration_tokenizer)"
},
{
"identifier": "Decorator",
"path": "reinvent/models/libinvent/models/decorator.py",
"snippet": "class Decorator(tnn.Module):\n \"\"\"\n An encoder-decoder that decorates scaffolds.\n \"\"\"\n\n def __init__(self, encoder_params, decoder_params):\n super(Decorator, self).__init__()\n\n self._encoder = Encoder(**encoder_params)\n self._decoder = Decoder(**decoder_params)\n\n def forward(\n self, encoder_seqs, encoder_seq_lengths, decoder_seqs, decoder_seq_lengths\n ): # pylint: disable=arguments-differ\n \"\"\"\n Performs the forward pass.\n :param encoder_seqs: A tensor with the output sequences (batch, seq_d, dim).\n :param encoder_seq_lengths: A list with the length of each input sequence.\n :param decoder_seqs: A tensor with the encoded input scaffold sequences (batch, seq_e, dim).\n :param decoder_seq_lengths: The lengths of the decoder sequences.\n :return : The output logits as a tensor (batch, seq_d, dim).\n \"\"\"\n encoder_padded_seqs, hidden_states = self.forward_encoder(encoder_seqs, encoder_seq_lengths)\n logits, _, _ = self.forward_decoder(\n decoder_seqs, decoder_seq_lengths, encoder_padded_seqs, hidden_states\n )\n return logits\n\n def forward_encoder(self, padded_seqs, seq_lengths):\n \"\"\"\n Does a forward pass only of the encoder.\n :param padded_seqs: The data to feed the encoder.\n :param seq_lengths: The length of each sequence in the batch.\n :return : Returns a tuple with (encoded_seqs, hidden_states)\n \"\"\"\n return self._encoder(padded_seqs, seq_lengths)\n\n def forward_decoder(self, padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states):\n \"\"\"\n Does a forward pass only of the decoder.\n :param hidden_states: The hidden states from the encoder.\n :param padded_seqs: The data to feed to the decoder.\n :param seq_lengths: The length of each sequence in the batch.\n :return : Returns the logits and the hidden state for each element of the sequence passed.\n \"\"\"\n return self._decoder(padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states)\n\n def get_params(self):\n \"\"\"\n Obtains the params for the network.\n :return : A dict with the params.\n \"\"\"\n return {\n \"encoder_params\": self._encoder.get_params(),\n \"decoder_params\": self._decoder.get_params(),\n }"
},
{
"identifier": "FileReader",
"path": "reinvent/chemistry/file_reader.py",
"snippet": "class FileReader:\n def __init__(self, configuration: List[FilterConfiguration], logger):\n self._conversions = Conversions()\n self._standardizer = RDKitStandardizer(configuration, logger)\n\n def read_library_design_data_file(\n self, file_path, ignore_invalid=True, num=-1, num_fields=0\n ) -> str:\n \"\"\"\n Reads a library design data file.\n :param num_fields: Number columns from the beginning to be loaded.\n :param file_path: Path to a SMILES file.\n :param ignore_invalid: Ignores invalid lines (empty lines)\n :param num: Parse up to num rows.\n :return: An iterator with the rows.\n \"\"\"\n\n with self._open_file(file_path, \"rt\") as csv_file:\n for i, row in enumerate(csv_file):\n if i == num:\n break\n splitted_row = row.rstrip().replace(\",\", \" \").replace(\"\\t\", \" \").split()\n if splitted_row:\n if num_fields > 0:\n splitted_row = splitted_row[0:num_fields]\n yield splitted_row\n elif not ignore_invalid:\n yield None\n\n def _open_file(self, path, mode=\"r\", with_gzip=False):\n \"\"\"\n Opens a file depending on whether it has or not gzip.\n :param path: Path where the file is located.\n :param mode: Mode to open the file.\n :param with_gzip: Open as a gzip file anyway.\n \"\"\"\n open_func = open\n if path.endswith(\".gz\") or with_gzip:\n open_func = gzip.open\n return open_func(path, mode)\n\n def read_delimited_file(\n self, file_path, ignore_invalid=True, num=-1, standardize=False, randomize=False\n ):\n \"\"\"\n Reads a file with SMILES strings in the first column.\n :param randomize: Standardizes smiles.\n :param standardize: Randomizes smiles.\n :param file_path: Path to a SMILES file.\n :param ignore_invalid: Ignores invalid lines (empty lines)\n :param num: Parse up to num rows.\n :return: An iterator with the rows.\n \"\"\"\n actions = []\n if standardize:\n actions.append(self._standardizer.apply_filter)\n if randomize:\n actions.append(self._conversions.randomize_smiles)\n\n with open(file_path, \"r\") as csv_file:\n for i, row in enumerate(csv_file):\n if i == num:\n break\n splitted_row = row.rstrip().replace(\",\", \" \").replace(\"\\t\", \" \").split()\n smiles = splitted_row[0]\n for action in actions:\n if smiles:\n smiles = action(smiles)\n if smiles:\n yield smiles\n elif not ignore_invalid:\n yield None"
}
] | from reinvent.models.libinvent.models.model import DecoratorModel
from reinvent.models.libinvent.models.vocabulary import DecoratorVocabulary
from reinvent.models.libinvent.models.decorator import Decorator
from reinvent.chemistry.file_reader import FileReader
import sys | 3,824 | """Create a Lbinvent model from a list of SMILES strings"""
from __future__ import annotations
def create_model(
num_layers: int,
layer_size: int,
dropout: float,
max_sequence_length: int,
input_smiles_path: str,
output_model_path: str,
):
"""Create a Lbinvent model from scratch
Learn the vocabulary from SMILES.
:returns: a new Libinvent model
"""
reader = FileReader([], None)
# build vocabulary
scaffolds, decorators = zip(
*reader.read_library_design_data_file(input_smiles_path, num_fields=2)
)
| """Create a Lbinvent model from a list of SMILES strings"""
from __future__ import annotations
def create_model(
num_layers: int,
layer_size: int,
dropout: float,
max_sequence_length: int,
input_smiles_path: str,
output_model_path: str,
):
"""Create a Lbinvent model from scratch
Learn the vocabulary from SMILES.
:returns: a new Libinvent model
"""
reader = FileReader([], None)
# build vocabulary
scaffolds, decorators = zip(
*reader.read_library_design_data_file(input_smiles_path, num_fields=2)
)
| vocabulary = DecoratorVocabulary.from_lists(scaffolds, decorators) | 1 | 2023-10-20 06:43:16+00:00 | 8k |
lion-agi/lionagi | lionagi/core/sessions/sessions.py | [
{
"identifier": "Tool",
"path": "lionagi/schema/base_tool.py",
"snippet": "class Tool(BaseNode):\n # name: str = None\n func: Any\n content: Any = None\n parser: Any = None\n schema_: dict\n\n @field_serializer('func')\n def serialize_func(self, func):\n return func.__name__"
},
{
"identifier": "DataLogger",
"path": "lionagi/schema/data_logger.py",
"snippet": "class DataLogger:\n \"\"\"\n A class for logging data entries and exporting them as CSV files.\n\n This class provides functionality to log data entries in a deque and \n supports exporting the logged data to a CSV file. The DataLogger can \n be configured to use a specific directory for saving files.\n\n Attributes:\n dir (Optional[str]): \n The default directory where CSV files will be saved.\n log (deque): \n A deque object that stores the logged data entries.\n\n Methods:\n __call__:\n Adds an entry to the log.\n to_csv:\n Exports the logged data to a CSV file and clears the log.\n set_dir:\n Sets the default directory for saving CSV files.\n \"\"\" \n\n def __init__(self, dir= None, log: list = None) -> None:\n \"\"\"\n Initializes the DataLogger with an optional directory and initial log.\n\n Parameters:\n dir (Optional[str]): The directory where CSV files will be saved. Defaults to None.\n\n log (Optional[List]): An initial list of log entries. Defaults to an empty list.\n \"\"\" \n self.dir = dir\n self.log = deque(log) if log else deque()\n\n def __call__(self, entry):\n \"\"\"\n Adds a new entry to the log.\n\n Parameters:\n entry: The data entry to be added to the log.\n \"\"\" \n self.log.append(entry)\n\n def to_csv(self, filename: str, dir: Optional[str] = None, verbose: bool = True, \n timestamp: bool = True, dir_exist_ok: bool = True, file_exist_ok: bool = False) -> None:\n \"\"\"\n Exports the logged data to a CSV file and optionally clears the log.\n\n Parameters:\n filename (str): The name of the CSV file.\n\n dir (Optional[str]): The directory to save the file. Defaults to the instance's dir attribute.\n\n verbose (bool): If True, prints a message upon completion. Defaults to True.\n\n timestamp (bool): If True, appends a timestamp to the filename. Defaults to True.\n\n dir_exist_ok (bool): If True, will not raise an error if the directory already exists. Defaults to True.\n\n file_exist_ok (bool): If True, overwrites the file if it exists. Defaults to False.\n\n Side Effects:\n Clears the log after saving the CSV file.\n\n Prints a message indicating the save location and number of logs saved if verbose is True.\n \"\"\" \n dir = dir or self.dir\n filepath = create_path(\n dir=dir, filename=filename, timestamp=timestamp, dir_exist_ok=dir_exist_ok)\n to_csv(list(self.log), filepath, file_exist_ok=file_exist_ok)\n n_logs = len(list(self.log))\n self.log = deque()\n if verbose:\n print(f\"{n_logs} logs saved to {filepath}\")\n \n def set_dir(self, dir: str) -> None:\n \"\"\"\n Sets the default directory for saving CSV files.\n\n Parameters:\n dir (str): The directory to be set as the default for saving files.\n \"\"\"\n self.dir = dir"
},
{
"identifier": "lcall",
"path": "lionagi/utils/call_util.py",
"snippet": "def lcall(\n input_: Any, func_: Callable, flatten: bool = False, \n dropna: bool = False, **kwargs\n ) -> List[Any]:\n \"\"\"\n Applies a function to each element of `input`, after converting it to a list.\n\n This function converts the `input` to a list, with options to flatten structures \n and lists, and then applies a given `func` to each element of the list.\n\n Parameters:\n input (Any): The input to be converted to a list and processed.\n\n func (Callable): The function to apply to each element of the list.\n\n flatten_dict (bool, optional): If True, flattens dictionaries in the input. Defaults to False.\n\n flat (bool, optional): If True, flattens nested lists in the input. Defaults to False.\n\n dropna (bool, optional): If True, drops None values during flattening. Defaults to True.\n\n Returns:\n List[Any]: A list containing the results of applying the `func` to each element.\n\n Raises:\n ValueError: If the `func` cannot be applied to the `input`.\n\n Example:\n >>> def square(x):\n ... return x * x\n >>> l_call([1, 2, 3], square)\n [1, 4, 9]\n \"\"\"\n try:\n lst = to_list(input_=input_, flatten=flatten, dropna=dropna)\n return [func_(i, **kwargs) for i in lst]\n except Exception as e:\n raise ValueError(f\"Given function cannot be applied to the input. Error: {e}\")"
},
{
"identifier": "alcall",
"path": "lionagi/utils/call_util.py",
"snippet": "async def alcall(\n input_: Any, func_: Callable, flatten: bool = False, dropna: bool = True, **kwargs\n ) -> List[Any]:\n \"\"\"\n Asynchronously applies a function to each element of `input`, after converting it to a list.\n\n This function converts the `input` to a list, with options to flatten \n dictionaries and lists, and then applies a given asynchronous `func` to \n each element of the list asynchronously.\n\n Parameters:\n input (Any): The input to be converted to a list and processed.\n\n func (Callable): The asynchronous function to apply to each element of the list.\n\n flatten_dict (bool, optional): If True, flattens dictionaries in the input. Defaults to False.\n\n flat (bool, optional): If True, flattens nested lists in the input. Defaults to False.\n\n dropna (bool, optional): If True, drops None values during flattening. Defaults to True.\n\n Returns:\n List[Any]: A list containing the results of applying the `func` to each element.\n\n Raises:\n ValueError: If the `func` cannot be applied to the `input`.\n\n Example:\n >>> async def async_square(x):\n ... return x * x\n >>> asyncio.run(al_call([1, 2, 3], async_square))\n [1, 4, 9]\n \"\"\"\n try:\n lst = to_list(input_=input_, flatten=flatten, dropna=dropna)\n tasks = [func_(i, **kwargs) for i in lst]\n return await asyncio.gather(*tasks)\n except Exception as e:\n raise ValueError(f\"Given function cannot be applied to the input. Error: {e}\")"
},
{
"identifier": "OpenAIService",
"path": "lionagi/services/oai.py",
"snippet": "class OpenAIService(BaseAPIService):\n\n base_url = \"https://api.openai.com/v1/\"\n\n def __init__(\n self,\n api_key: str = None,\n token_encoding_name: str = \"cl100k_base\",\n max_attempts: int = 3,\n max_requests_per_minute: int = 500,\n max_tokens_per_minute: int = 150_000,\n ratelimiter = BaseAPIRateLimiter,\n status_tracker = None,\n queue = None,\n ):\n super().__init__(\n api_key = api_key or getenv(\"OPENAI_API_KEY\"),\n status_tracker = status_tracker,\n queue = queue,\n ratelimiter=ratelimiter,\n max_requests_per_minute=max_requests_per_minute, \n max_tokens_per_minute=max_tokens_per_minute),\n self.token_encoding_name=token_encoding_name\n self.max_attempts = max_attempts\n\n async def serve(self, payload, endpoint_=\"chat/completions\", method=\"post\"):\n return await self._serve(payload=payload, endpoint_=endpoint_, method=method)"
},
{
"identifier": "ChatCompletion",
"path": "lionagi/endpoints/chatcompletion.py",
"snippet": "class ChatCompletion(BaseEndpoint):\n \"\"\"\n Represents an endpoint for chat completions in an API.\n\n This class is designed to handle the creation of payloads for chat completion requests. The 'endpoint' attribute specifies the API endpoint for chat completions.\n\n Attributes:\n endpoint (str): The API endpoint for chat completions.\n \"\"\"\n endpoint: str = \"chat/completions\"\n\n @classmethod\n def create_payload(scls, messages, llmconfig, schema, **kwargs):\n \"\"\"\n Creates a payload for a chat completion request using provided messages, configuration, and schema.\n\n This method constructs a payload dictionary that includes required and optional parameters \n as specified in the schema. Required parameters are extracted from 'llmconfig' and 'kwargs', \n while optional parameters are included only if they are truthy and not equal to the string \"none\".\n\n Parameters:\n messages (list): A list of message objects to include in the payload.\n llmconfig (dict): A dictionary containing configuration settings for the large language model.\n schema (dict): A dictionary defining required and optional keys for the payload.\n The 'required' key should map to a list of required parameter names.\n The 'optional' key should map to a list of optional parameter names.\n **kwargs: Additional keyword arguments that can override or supplement 'llmconfig'.\n\n Returns:\n dict: A dictionary representing the payload for the chat completion request.\n\n Example:\n payload = ChatCompletion.create_payload(\n messages=[{\"text\": \"Hello, world!\"}],\n llmconfig={\"max_tokens\": 100},\n schema={\"required\": [\"max_tokens\"], \"optional\": [\"temperature\"]}\n )\n \"\"\"\n config = {**llmconfig, **kwargs}\n payload = {\"messages\": messages}\n for key in schema['required']:\n payload.update({key: config[key]})\n\n for key in schema['optional']:\n if bool(config[key]) is True and str(config[key]).lower() != \"none\":\n payload.update({key: config[key]})\n return payload\n \n # def process_response(self, session, payload, completion):\n # ..."
},
{
"identifier": "ToolManager",
"path": "lionagi/objs/tool_manager.py",
"snippet": "class ToolManager(BaseNode):\n registry: Dict = {}\n\n def name_existed(self, name: str):\n return True if name in self.registry.keys() else False\n \n def _register_tool(self, tool): #,update=False, new=False, prefix=None, postfix=None):\n \n # if self._name_existed(tool.name):\n # if update and new:\n # raise ValueError(f\"Cannot both update and create new registry for existing function {tool.name} at the same time\")\n\n # if len(name) > len(tool.func.__name__):\n # if new and not postfix:\n # try:\n # idx = str_to_num(name[-3:], int)\n # if idx > 0:\n # postfix = idx + 1\n # except:\n # pass\n\n # name = f\"{prefix or ''}{name}{postfix}\" if new else tool.func.__name__\n\n if not isinstance(tool, Tool):\n raise TypeError('Please register a Tool object.')\n name = tool.schema_['function']['name']\n self.registry.update({name: tool})\n \n async def invoke(self, func_call):\n name, kwargs = func_call\n if self.name_existed(name):\n tool = self.registry[name]\n func = tool.func\n parser = tool.parser\n try:\n if asyncio.iscoroutinefunction(func):\n return await parser(func(**kwargs)) if parser else func(**kwargs)\n else:\n return parser(func(**kwargs)) if parser else func(**kwargs)\n except Exception as e:\n raise ValueError(f\"Error when invoking function {name} with arguments {kwargs} with error message {e}\")\n else: \n raise ValueError(f\"Function {name} is not registered.\")\n \n @staticmethod\n def get_function_call(response):\n \"\"\"\n Extract function name and arguments from a response JSON.\n\n Parameters:\n response (dict): The JSON response containing function information.\n\n Returns:\n Tuple[str, dict]: The function name and its arguments.\n \"\"\"\n try:\n func = response['function'][5:]\n args = json.loads(response['arguments'])\n return (func, args)\n except:\n try:\n func = response['recipient_name'].split('.')[-1]\n args = response['parameters']\n return (func, args)\n except:\n raise ValueError('response is not a valid function call')\n \n def register_tools(self, tools): #, update=False, new=False, prefix=None, postfix=None ):\n lcall(tools, self._register_tool) #, update=update, new=new, prefix=prefix, postfix=postfix)\n\n def to_tool_schema_list(self):\n schema_list = []\n for tool in self.registry.values():\n schema_list.append(tool.schema_)\n return schema_list"
},
{
"identifier": "oai_schema",
"path": "lionagi/configs/oai_configs.py",
"snippet": ""
},
{
"identifier": "Conversation",
"path": "lionagi/core/conversations/conversation.py",
"snippet": "class Conversation(BaseNode):\n \"\"\"\n A conversation that handles messages and responses.\n\n Attributes:\n response_counts (int): A counter for the number of responses in the conversation.\n messages (List[Message]): A list of message objects in the conversation.\n msgr (Messenger): An instance of Messenger to create message objects.\n responses (List[Response]): A list of response objects in the conversation.\n \"\"\" \n\n response_counts : int = 0\n messages: List[Message] = []\n msgr : Any = Messenger()\n responses: List[Response] = []\n\n def initiate_conversation(\n self, system=None, instruction=None, \n context=None, name=None\n ):\n \"\"\"\n Initiates a new conversation, erase previous messages and responses.\n\n Parameters:\n system (Any, optional): System information to include in the initial message. Defaults to None.\n instruction (Any, optional): Instruction details to include in the conversation. Defaults to None.\n context (Any, optional): Contextual information relevant to the conversation. Defaults to None.\n name (str, optional): The name associated with the conversation. Defaults to None.\n\n Returns:\n None\n \"\"\"\n self.messages, self.responses = [], []\n self.add_messages(system=system)\n self.add_messages(instruction=instruction, context=context, name=name)\n\n # modify the message adding to accomodate tools\n def add_messages(\n self, system=None, instruction=None, \n context=None, response=None, name=None\n ):\n \"\"\"\n Adds a new message object to the conversation messages list based on the provided parameters.\n\n Parameters:\n system (Any, optional): System information to include in the message. Defaults to None.\n instruction (Any, optional): Instruction details to include in the message. Defaults to None.\n context (Any, optional): Contextual information relevant to the message. Defaults to None.\n response (Any, optional): Response details to include in the message. Defaults to None.\n name (str, optional): The name associated with the message. Defaults to None.\n\n Returns:\n None\n \"\"\"\n msg = self.msgr.create_message(\n system=system, instruction=instruction, \n context=context, response=response, name=name\n )\n self.messages.append(msg)\n\n def change_system(self, system):\n \"\"\"\n Changes the system information of the first message in the conversation.\n\n Parameters:\n system (Any): The new system information to be set.\n\n Returns:\n None\n \"\"\"\n self.messages[0] = self.msgr.create_message(system=system)\n\n\n def keep_last_n_exchanges(self, n: int):\n \"\"\"\n Keeps only the last n exchanges in the conversation, where an exchange starts with a user message. This function trims the conversation to retain only the specified number of the most recent exchanges. \n An exchange is defined as a sequence of messages starting with a user message. \n The first message in the conversation, typically a system message, is always retained.\n\n Parameters:\n n (int): The number of exchanges to keep in the conversation.\n\n Returns:\n None: The method modifies the conversation in place and does not return a value.\n\n Raises:\n ValueError: If n is not a positive integer.\n\n Note:\n This function assumes the first message in the conversation is a system message and each user message \n marks the beginning of a new exchange.\n \"\"\"\n response_indices = [\n index for index, message in enumerate(self.messages[1:]) \n if message.role == \"user\"\n ]\n if len(response_indices) >= n:\n first_index_to_keep = response_indices[-n] + 1\n self.messages = [self.system] + self.messages[first_index_to_keep:]"
}
] | import json
from typing import Any
from dotenv import load_dotenv
from lionagi.schema import DataLogger, Tool
from lionagi.utils import lcall, alcall
from lionagi.services import OpenAIService
from lionagi.endpoints import ChatCompletion
from lionagi.objs.tool_manager import ToolManager
from lionagi.configs.oai_configs import oai_schema
from lionagi.core.conversations.conversation import Conversation | 4,794 |
load_dotenv()
OAIService = OpenAIService()
class Session:
def __init__(
self, system, dir=None, llmconfig=oai_schema['chat']['config'],
service=OAIService
):
self.conversation = Conversation()
self.system = system
self.llmconfig = llmconfig
self.logger_ = DataLogger(dir=dir)
self.service = service
self.tool_manager = ToolManager()
def set_dir(self, dir):
self.logger_.dir = dir
def set_system(self, system):
self.conversation.change_system(system)
def set_llmconfig(self, llmconfig):
self.llmconfig = llmconfig
def set_service(self, service):
self.service = service
async def _output(self, invoke=True, out=True):
if invoke:
try:
# func, args = self.tool_manager._get_function_call(self.conversation.responses[-1]['content'])
# outs = await self.tool_manager.invoke(func, args)
# self.conversation.add_messages(response=outs)
tool_uses = json.loads(self.conversation.responses[-1].message_content)
if 'function_list' in tool_uses.keys():
func_calls = lcall(tool_uses['function_list'], self.tool_manager.get_function_call)
else:
func_calls = lcall(tool_uses['tool_uses'], self.tool_manager.get_function_call)
outs = await alcall(func_calls, self.tool_manager.invoke)
for out, f in zip(outs, func_calls):
response = {"function": f[0], "arguments": f[1], "output": out}
self.conversation.add_messages(response=response)
except:
pass
if out:
return self.conversation.responses[-1].message_content
def _is_invoked(self):
content = self.conversation.messages[-1].message_content
try:
if json.loads(content).keys() >= {'function', 'arguments', 'output'}:
return True
except:
return False
def register_tools(self, tools): #, update=False, new=False, prefix=None, postfix=None):
if not isinstance(tools, list):
tools=[tools]
self.tool_manager.register_tools(tools=tools) #, update=update, new=new, prefix=prefix, postfix=postfix)
# tools_schema = lcall(tools, lambda tool: tool.to_dict()['schema_'])
# if self.llmconfig['tools'] is None:
# self.llmconfig['tools'] = tools_schema
# else:
# self.llmconfig['tools'] += tools_schema
def _tool_parser(self, **kwargs):
# 1. single schema: dict
# 2. tool: Tool
# 3. name: str
# 4. list: 3 types of lists
def tool_check(tool):
if isinstance(tool, dict):
return tool
|
load_dotenv()
OAIService = OpenAIService()
class Session:
def __init__(
self, system, dir=None, llmconfig=oai_schema['chat']['config'],
service=OAIService
):
self.conversation = Conversation()
self.system = system
self.llmconfig = llmconfig
self.logger_ = DataLogger(dir=dir)
self.service = service
self.tool_manager = ToolManager()
def set_dir(self, dir):
self.logger_.dir = dir
def set_system(self, system):
self.conversation.change_system(system)
def set_llmconfig(self, llmconfig):
self.llmconfig = llmconfig
def set_service(self, service):
self.service = service
async def _output(self, invoke=True, out=True):
if invoke:
try:
# func, args = self.tool_manager._get_function_call(self.conversation.responses[-1]['content'])
# outs = await self.tool_manager.invoke(func, args)
# self.conversation.add_messages(response=outs)
tool_uses = json.loads(self.conversation.responses[-1].message_content)
if 'function_list' in tool_uses.keys():
func_calls = lcall(tool_uses['function_list'], self.tool_manager.get_function_call)
else:
func_calls = lcall(tool_uses['tool_uses'], self.tool_manager.get_function_call)
outs = await alcall(func_calls, self.tool_manager.invoke)
for out, f in zip(outs, func_calls):
response = {"function": f[0], "arguments": f[1], "output": out}
self.conversation.add_messages(response=response)
except:
pass
if out:
return self.conversation.responses[-1].message_content
def _is_invoked(self):
content = self.conversation.messages[-1].message_content
try:
if json.loads(content).keys() >= {'function', 'arguments', 'output'}:
return True
except:
return False
def register_tools(self, tools): #, update=False, new=False, prefix=None, postfix=None):
if not isinstance(tools, list):
tools=[tools]
self.tool_manager.register_tools(tools=tools) #, update=update, new=new, prefix=prefix, postfix=postfix)
# tools_schema = lcall(tools, lambda tool: tool.to_dict()['schema_'])
# if self.llmconfig['tools'] is None:
# self.llmconfig['tools'] = tools_schema
# else:
# self.llmconfig['tools'] += tools_schema
def _tool_parser(self, **kwargs):
# 1. single schema: dict
# 2. tool: Tool
# 3. name: str
# 4. list: 3 types of lists
def tool_check(tool):
if isinstance(tool, dict):
return tool | elif isinstance(tool, Tool): | 0 | 2023-10-17 03:10:02+00:00 | 8k |
ziqipang/LM4VisualEncoding | pointcloud_classification/models/Point_BERT.py | [
{
"identifier": "Group",
"path": "pointcloud_classification/models/dvae.py",
"snippet": "class Group(nn.Module):\n def __init__(self, num_group, group_size):\n super().__init__()\n self.num_group = num_group\n self.group_size = group_size\n # self.knn = KNN(k=self.group_size, transpose_mode=True)\n\n def forward(self, xyz):\n '''\n input: B N 3\n ---------------------------\n output: B G M 3\n center : B G 3\n '''\n batch_size, num_points, _ = xyz.shape\n # fps the centers out\n center = misc.fps(xyz, self.num_group) # B G 3\n # knn to get the neighborhood\n # _, idx = self.knn(xyz, center) # B G M\n idx = knn_point(self.group_size, xyz, center) # B G M\n assert idx.size(1) == self.num_group\n assert idx.size(2) == self.group_size\n idx_base = torch.arange(0, batch_size, device=xyz.device).view(-1, 1, 1) * num_points\n idx = idx + idx_base\n idx = idx.view(-1)\n neighborhood = xyz.view(batch_size * num_points, -1)[idx, :]\n neighborhood = neighborhood.view(batch_size, self.num_group, self.group_size, 3).contiguous()\n # normalize\n neighborhood = neighborhood - center.unsqueeze(2)\n return neighborhood, center"
},
{
"identifier": "DiscreteVAE",
"path": "pointcloud_classification/models/dvae.py",
"snippet": "class DiscreteVAE(nn.Module):\n def __init__(self, config, **kwargs):\n super().__init__()\n self.group_size = config.group_size\n self.num_group = config.num_group\n self.encoder_dims = config.encoder_dims\n self.tokens_dims = config.tokens_dims\n\n self.decoder_dims = config.decoder_dims\n self.num_tokens = config.num_tokens\n\n \n self.group_divider = Group(num_group = self.num_group, group_size = self.group_size)\n self.encoder = Encoder(encoder_channel = self.encoder_dims)\n self.dgcnn_1 = DGCNN(encoder_channel = self.encoder_dims, output_channel = self.num_tokens)\n self.codebook = nn.Parameter(torch.randn(self.num_tokens, self.tokens_dims))\n\n self.dgcnn_2 = DGCNN(encoder_channel = self.tokens_dims, output_channel = self.decoder_dims)\n self.decoder = Decoder(encoder_channel = self.decoder_dims, num_fine = self.group_size)\n self.build_loss_func()\n\n \n \n def build_loss_func(self):\n self.loss_func_cdl1 = ChamferDistanceL1().cuda()\n self.loss_func_cdl2 = ChamferDistanceL2().cuda()\n self.loss_func_emd = emd().cuda()\n\n def recon_loss(self, ret, gt):\n whole_coarse, whole_fine, coarse, fine, group_gt, _ = ret\n\n bs, g, _, _ = coarse.shape\n\n coarse = coarse.reshape(bs*g, -1, 3).contiguous()\n fine = fine.reshape(bs*g, -1, 3).contiguous()\n group_gt = group_gt.reshape(bs*g, -1, 3).contiguous()\n\n loss_coarse_block = self.loss_func_cdl1(coarse, group_gt)\n loss_fine_block = self.loss_func_cdl1(fine, group_gt)\n\n loss_recon = loss_coarse_block + loss_fine_block\n\n return loss_recon\n\n def get_loss(self, ret, gt):\n\n # reconstruction loss\n loss_recon = self.recon_loss(ret, gt)\n # kl divergence\n logits = ret[-1] # B G N\n softmax = F.softmax(logits, dim=-1)\n mean_softmax = softmax.mean(dim=1)\n log_qy = torch.log(mean_softmax)\n log_uniform = torch.log(torch.tensor([1. / self.num_tokens], device = gt.device))\n loss_klv = F.kl_div(log_qy, log_uniform.expand(log_qy.size(0), log_qy.size(1)), None, None, 'batchmean', log_target = True)\n\n return loss_recon, loss_klv\n\n\n def forward(self, inp, temperature = 1., hard = False, **kwargs):\n neighborhood, center = self.group_divider(inp)\n logits = self.encoder(neighborhood) # B G C\n logits = self.dgcnn_1(logits, center) # B G N\n soft_one_hot = F.gumbel_softmax(logits, tau = temperature, dim = 2, hard = hard) # B G N\n sampled = torch.einsum('b g n, n c -> b g c', soft_one_hot, self.codebook) # B G C\n feature = self.dgcnn_2(sampled, center)\n coarse, fine = self.decoder(feature)\n\n\n with torch.no_grad():\n whole_fine = (fine + center.unsqueeze(2)).reshape(inp.size(0), -1, 3)\n whole_coarse = (coarse + center.unsqueeze(2)).reshape(inp.size(0), -1, 3)\n\n assert fine.size(2) == self.group_size\n ret = (whole_coarse, whole_fine, coarse, fine, neighborhood, logits)\n return ret"
},
{
"identifier": "Encoder",
"path": "pointcloud_classification/models/dvae.py",
"snippet": "class Encoder(nn.Module):\n def __init__(self, encoder_channel):\n super().__init__()\n self.encoder_channel = encoder_channel\n self.first_conv = nn.Sequential(\n nn.Conv1d(3, 128, 1),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 256, 1)\n )\n self.second_conv = nn.Sequential(\n nn.Conv1d(512, 512, 1),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Conv1d(512, self.encoder_channel, 1)\n )\n def forward(self, point_groups):\n '''\n point_groups : B G N 3\n -----------------\n feature_global : B G C\n '''\n bs, g, n , _ = point_groups.shape\n point_groups = point_groups.reshape(bs * g, n, 3)\n # encoder\n feature = self.first_conv(point_groups.transpose(2,1)) # BG 256 n\n feature_global = torch.max(feature,dim=2,keepdim=True)[0] # BG 256 1\n feature = torch.cat([feature_global.expand(-1,-1,n), feature], dim=1)# BG 512 n\n feature = self.second_conv(feature) # BG 1024 n\n feature_global = torch.max(feature, dim=2, keepdim=False)[0] # BG 1024\n return feature_global.reshape(bs, g, self.encoder_channel)"
},
{
"identifier": "LLaMATransformer",
"path": "pointcloud_classification/models/llama.py",
"snippet": "class LLaMATransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.n_layers = config['n_layers']\n self.first_layer = config['first_layer']\n self.layers = torch.nn.ModuleList()\n print(f'LLaMA Transformer with {self.n_layers} layers, first layer {self.first_layer}')\n for layer_id in range(self.first_layer, self.n_layers):\n self.layers.append(TransformerBlock(layer_id, config))\n\n self.norm = RMSNorm(config['dim'], eps=config['norm_eps'])\n self.prepare_inputs_for_generation = None\n \n # @torch.inference_mode()\n def forward(self, h):\n for layer in self.layers:\n h = layer(h)\n h = self.norm(h)\n return h.float()\n\n def custom_load_state_dict(self, checkpoint, tail=False, strict=False):\n # self.load_state_dict(checkpoint, strict=strict)\n # load the final layers\n if tail:\n for i in range(self.first_layer, self.n_layers):\n layer_checkpoint_keys = [k for k in checkpoint.keys() if f'layers.{i}.' in k]\n layer_checkpoint_keys = [k.replace(f'layers.{i}.', '') for k in layer_checkpoint_keys]\n layer_checkpoint = {k: checkpoint[f'layers.{i}.{k}'] for k in layer_checkpoint_keys}\n layer_checkpoint.pop('attention.inner_attention.rope.freqs')\n self.layers[i - self.first_layer].load_state_dict(layer_checkpoint, strict=True)\n return\n\n @torch.inference_mode()\n def forward_llama(self, tokens: torch.Tensor, start_pos: int):\n _bsz, seqlen = tokens.shape\n h = self.tok_embeddings(tokens)\n self.freqs_cis = self.freqs_cis.to(h.device)\n freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]\n\n mask = None\n if seqlen > 1:\n mask = torch.full((1, 1, seqlen, seqlen), float(\"-inf\"), device=tokens.device)\n mask = torch.triu(mask, diagonal=start_pos + 1).type_as(h)\n\n if self.adapter:\n adapter_index = 0\n adapter = self.adapter_query.weight.reshape(-1, self.adapter_len, 4096).unsqueeze(1)\n for layer in self.layers:\n if not self.use_adapter:\n h = layer(h, start_pos, freqs_cis, mask)\n else:\n h = layer(h, start_pos, freqs_cis, mask, adapter[adapter_index])\n adapter_index += 1\n h = self.norm(h)\n output = self.output(h[:, -1, :]) # only compute last logits\n return output.float()"
},
{
"identifier": "MODELS",
"path": "pointcloud_classification/models/build.py",
"snippet": "MODELS = registry.Registry('models')"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import timm
import numpy as np
import random
from pathlib import Path
from timm.models.layers import DropPath, trunc_normal_
from .dvae import Group
from .dvae import DiscreteVAE, Encoder
from .llama import LLaMATransformer
from .build import MODELS
from utils import misc
from utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message
from utils.logger import * | 3,770 | x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class TransformerEncoder(nn.Module):
""" Transformer Encoder without hierarchical structure
"""
def __init__(self, embed_dim=768, depth=4, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.):
super().__init__()
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path = drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate
)
for i in range(depth)])
def forward(self, x, pos):
for _, block in enumerate(self.blocks):
x = block(x + pos)
return x
@MODELS.register_module()
class PointTransformer(nn.Module):
def __init__(self, config, **kwargs):
super().__init__()
self.config = config
self.trans_dim = config.trans_dim
self.depth = config.depth
self.drop_path_rate = config.drop_path_rate
self.cls_dim = config.cls_dim
self.num_heads = config.num_heads
self.group_size = config.group_size
self.num_group = config.num_group
# grouper
self.group_divider = Group(num_group = self.num_group, group_size = self.group_size)
# define the encoder
self.encoder_dims = config.encoder_dims
self.encoder = Encoder(encoder_channel = self.encoder_dims)
# bridge encoder and transformer
self.reduce_dim = nn.Linear(self.encoder_dims, self.trans_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.trans_dim))
self.cls_pos = nn.Parameter(torch.randn(1, 1, self.trans_dim))
self.pos_embed = nn.Sequential(
nn.Linear(3, 128),
nn.GELU(),
nn.Linear(128, self.trans_dim)
)
dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)]
self.blocks = TransformerEncoder(
embed_dim = self.trans_dim,
depth = self.depth,
drop_path_rate = dpr,
num_heads = self.num_heads
)
self.norm = nn.LayerNorm(self.trans_dim)
self.cls_head_finetune = nn.Sequential(
nn.Linear(self.trans_dim * 2, 256),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(256, self.cls_dim)
)
if hasattr(config, 'use_llama') and config.use_llama:
llama_default_config = dict(config.llama_cfg)
|
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class TransformerEncoder(nn.Module):
""" Transformer Encoder without hierarchical structure
"""
def __init__(self, embed_dim=768, depth=4, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.):
super().__init__()
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path = drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate
)
for i in range(depth)])
def forward(self, x, pos):
for _, block in enumerate(self.blocks):
x = block(x + pos)
return x
@MODELS.register_module()
class PointTransformer(nn.Module):
def __init__(self, config, **kwargs):
super().__init__()
self.config = config
self.trans_dim = config.trans_dim
self.depth = config.depth
self.drop_path_rate = config.drop_path_rate
self.cls_dim = config.cls_dim
self.num_heads = config.num_heads
self.group_size = config.group_size
self.num_group = config.num_group
# grouper
self.group_divider = Group(num_group = self.num_group, group_size = self.group_size)
# define the encoder
self.encoder_dims = config.encoder_dims
self.encoder = Encoder(encoder_channel = self.encoder_dims)
# bridge encoder and transformer
self.reduce_dim = nn.Linear(self.encoder_dims, self.trans_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.trans_dim))
self.cls_pos = nn.Parameter(torch.randn(1, 1, self.trans_dim))
self.pos_embed = nn.Sequential(
nn.Linear(3, 128),
nn.GELU(),
nn.Linear(128, self.trans_dim)
)
dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)]
self.blocks = TransformerEncoder(
embed_dim = self.trans_dim,
depth = self.depth,
drop_path_rate = dpr,
num_heads = self.num_heads
)
self.norm = nn.LayerNorm(self.trans_dim)
self.cls_head_finetune = nn.Sequential(
nn.Linear(self.trans_dim * 2, 256),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(256, self.cls_dim)
)
if hasattr(config, 'use_llama') and config.use_llama:
llama_default_config = dict(config.llama_cfg) | self.llama = LLaMATransformer(llama_default_config) | 3 | 2023-10-19 15:40:57+00:00 | 8k |
stanford-oval/WikiChat | pipelines/chatbot.py | [
{
"identifier": "DialogueTurn",
"path": "pipelines/dialog_turn.py",
"snippet": "class DialogueTurn:\n def __init__(\n self,\n agent_utterance: str = None,\n user_utterance: str = None,\n pipeline: str = None,\n engine: str = None,\n generate_engine: str = None,\n draft_engine: str = None,\n ):\n self.engine = engine\n self.generate_engine = generate_engine\n self.draft_engine = draft_engine\n self.pipeline = pipeline\n self.wall_time_seconds = (\n 0 # how much time it took to generate this turn, in seconds\n )\n self.agent_utterance = agent_utterance\n self.user_utterance = user_utterance\n\n # retrieve_and_generate pipeline\n self.initial_search_query = None\n self.initial_search_query_time = None\n self.initial_search_results = []\n self.initial_search_result_titles = []\n self.initial_search_bullets = []\n\n # generate_and_correct pipeline\n self.llm_utterance = None\n self.claims = []\n self.verification_retrieval_results = {}\n self.verification_result = {}\n\n # early_combine pipeline\n self.combined_evidences = []\n self.combined_utterance = None\n self.feedback = []\n self.feedback_scores = []\n self.refined_utterance = None\n\n def _summarize_vc_log(self):\n verification_summary = {}\n assert len(self.verification_result) == len(\n self.verification_retrieval_results\n ), \"We need to have retrieved evidence for all claims\"\n for key, value in self.verification_retrieval_results.items():\n claim_idx = int(key)\n v_ret_results = []\n for v in value:\n title, paragraph, score = tuple(v)\n v_ret_results.append(\n {\"title\": title, \"paragraph\": paragraph, \"score\": round(score, 1)}\n )\n verification_summary[self.claims[claim_idx][0]] = OrderedDict(\n {\n \"label\": self.verification_result[claim_idx][\"label\"],\n \"fixed_claim\": self.verification_result[claim_idx][\"fixed_claim\"],\n \"retrieval_results\": v_ret_results,\n }\n )\n return verification_summary\n\n def _summarize_rg_log(self):\n rg_summary = {\n \"initial_search_query\": self.initial_search_query,\n \"initial_search_query_time\": self.initial_search_query_time,\n \"initial_search_bullets\": self.initial_search_bullets,\n \"initial_search_results\": [],\n }\n\n for i in range(len(self.initial_search_results)):\n rg_summary[\"initial_search_results\"].append(\n {\n \"title\": self.initial_search_result_titles[i],\n \"paragraph\": self.initial_search_results[i],\n # 'bullets': self.initial_search_bullets,\n }\n )\n\n return rg_summary\n\n def log(self):\n \"\"\"\n Returns a json object that contains all information inside `self`\n \"\"\"\n # combine fields into a more human-readable field\n verification_summary = self._summarize_vc_log()\n rg_summary = self._summarize_rg_log()\n\n return OrderedDict(\n {\n # retrieve_and_generate pipeline\n \"retrieve_and_generate\": rg_summary,\n # generate_and_correct pipeline\n \"llm_utterance\": self.llm_utterance,\n \"generate_and_correct\": verification_summary,\n # early_combine pipeline\n \"combined_evidences\": self.combined_evidences,\n \"combined_utterance\": self.combined_utterance,\n \"feedback\": self.feedback,\n \"feedback_scores\": self.feedback_scores,\n \"refined_utterance\": self.refined_utterance,\n \"user_utterance\": self.user_utterance,\n \"agent_utterance\": self.agent_utterance,\n \"engine\": self.engine,\n \"generate_engine\": self.generate_engine,\n \"draft_engine\": self.draft_engine,\n \"pipeline\": self.pipeline,\n \"wall_time_seconds\": round(self.wall_time_seconds, 1),\n }\n )\n\n @staticmethod\n def utterance_list_to_dialog_history(utterance_list: List[str]):\n \"\"\"\n The resulting dialog history will not have all the fields correctly initialized, since no information about e.g. search queries is available\n \"\"\"\n dialog_history = []\n assert (\n len(utterance_list) % 2 == 1\n ), \"The first turn is always the user, and the turn to be generated is always the agent, so the number of turns should be odd\"\n for i in range(0, len(utterance_list) - 2, 2):\n dialog_history.append(\n DialogueTurn(\n user_utterance=utterance_list[i],\n agent_utterance=utterance_list[i + 1],\n )\n )\n user_utterance = utterance_list[-1]\n\n return dialog_history, user_utterance\n\n @staticmethod\n def dialog_history_to_utterance_list(dialog_history) -> List[str]:\n \"\"\"\n Convert a list of DialogueTurns to a list of strings\n \"\"\"\n utterance_list = []\n for turn in dialog_history:\n utterance_list.append(turn.user_utterance)\n utterance_list.append(turn.agent_utterance)\n return utterance_list"
},
{
"identifier": "llm_generate",
"path": "llm/llm_generate.py",
"snippet": "def llm_generate(\n template_file: str,\n prompt_parameter_values: Union[dict, List[dict]],\n engine: str,\n max_tokens: int,\n temperature: float,\n stop_tokens,\n top_p: float = 0.9,\n frequency_penalty: float = 0,\n presence_penalty: float = 0,\n postprocess: bool = True,\n filled_prompt=None,\n):\n \"\"\"\n Generates continuations for one or more prompts in parallel\n Inputs:\n prompt_parameter_values: dict or list of dict. If the input is a list, the output will be a list as well\n filled_prompt: gives direct access to the underlying model, without having to load a prompt template from a .prompt file. Used for testing.\n \"\"\"\n if not (\n filled_prompt is None\n and prompt_parameter_values is not None\n and template_file is not None\n ) and not (\n filled_prompt is not None\n and prompt_parameter_values is None\n and template_file is None\n ):\n raise ValueError(\n \"Can only use filled_prompt if template_file and prompt_parameter_values are None\"\n )\n\n # Decide which LLM resource to send this request to.\n # Use hash so that each time this function gets called with the same parameters after a backoff, the request gets sent to the same resource\n potential_llm_resources = [\n resource\n for resource in global_variables.all_llm_endpoints\n if engine in resource[\"engine_map\"]\n ]\n llm_resource = potential_llm_resources[\n hash(\n str(\n (\n template_file,\n prompt_parameter_values,\n engine,\n max_tokens,\n temperature,\n stop_tokens,\n top_p,\n frequency_penalty,\n presence_penalty,\n )\n )\n )\n % len(potential_llm_resources)\n ]\n # uniform load balancing instead of hashing\n # llm_resource = potential_llm_resources[random.randrange(len(potential_llm_resources))]\n\n if llm_resource[\"api_type\"] == \"local\":\n prompt_format = llm_resource[\"prompt_format\"]\n else:\n prompt_format = \"none\"\n\n # convert to a single element list so that the rest of the code only has to deal with a list\n input_was_list = True\n if filled_prompt is None:\n assert prompt_parameter_values is not None\n if not isinstance(prompt_parameter_values, list):\n input_was_list = False\n prompt_parameter_values = [prompt_parameter_values]\n filled_prompt, rendered_blocks = _fill_prompt(\n template_file, prompt_parameter_values, engine, prompt_format\n )\n else:\n if not isinstance(filled_prompt, list):\n input_was_list = False\n filled_prompt = [filled_prompt]\n\n assert isinstance(filled_prompt, list)\n\n # Call LLM to generate outputs\n generation_output = _llm_completion_with_backoff_and_cache(\n original_engine_name=engine,\n **_set_llm_resource_fields(\n llm_resource=llm_resource,\n engine=engine,\n prompt=filled_prompt,\n max_tokens=max_tokens,\n temperature=temperature,\n top_p=top_p,\n frequency_penalty=frequency_penalty,\n presence_penalty=presence_penalty,\n stop=stop_tokens,\n )\n )\n outputs = []\n for choice in generation_output[\"choices\"]:\n if choice[\"text\"]:\n outputs.append(choice[\"text\"])\n\n logger.info(\"LLM output: %s\", json.dumps(outputs, indent=2, ensure_ascii=False))\n\n # calculate and record the cost\n cost_prompt, cost_completion = global_variables._model_name_to_cost(engine)\n total_cost = (\n generation_output[\"usage\"][\"prompt_tokens\"] * cost_prompt\n + generation_output[\"usage\"].get(\"completion_tokens\", 0) * cost_completion\n ) / 1000\n global_variables.add_to_total_cost(total_cost)\n\n # postprocess the generation outputs\n outputs = [o.strip() for o in outputs]\n if postprocess:\n outputs = [_postprocess_generations(o) for o in outputs]\n\n # add to prompt logs if needed\n if global_variables.debug_prompts:\n with global_variables.thread_lock:\n for i, o in enumerate(outputs):\n if template_file in global_variables.prompts_to_skip_for_debugging:\n continue\n global_variables.prompt_logs.append(\n {\n \"template_name\": template_file,\n \"instruction\": rendered_blocks[i][\"short_instruction\"]\n if \"short_instruction\" in rendered_blocks[i]\n else rendered_blocks[i][\"instruction\"],\n \"input\": rendered_blocks[i][\"input\"],\n \"output\": o,\n }\n )\n\n if outputs == []:\n outputs = \"\"\n\n # convert back to a single item\n if len(outputs) == 1 and not input_was_list:\n outputs = outputs[0]\n return outputs"
},
{
"identifier": "ClaimSplitter",
"path": "pipelines/claim_splitter.py",
"snippet": "class ClaimSplitter:\n def __init__(self, prompt_template_file: str):\n self.prompt_template_file = prompt_template_file\n\n def split_claim(\n self,\n dialog_history: List,\n new_user_utterance: str,\n current_agent_utterance: str,\n engine_dict: dict,\n dialog_topic: str = None,\n claims_output=None\n ):\n \"\"\"\n dialog_topic: used for splitting claims of a simulated dialog we want to evaluate\n claims_output: If provided, won't run LLM again and will use this output instead\n \"\"\"\n if not claims_output:\n assert current_agent_utterance is not None, \"current_agent_utterance must be provided when `split_claim` is not fused with `generate`\"\n claims_output = llm_generate(\n template_file=self.prompt_template_file,\n prompt_parameter_values={\n \"dlg\": dialog_history,\n \"new_user_utterance\": new_user_utterance,\n \"current_agent_utterance\": current_agent_utterance,\n \"dialog_topic\": dialog_topic,\n },\n engine=engine_dict[\"default\"],\n max_tokens=300,\n temperature=0,\n stop_tokens=[\"=====\"],\n postprocess=False,\n )\n\n if claims_output.startswith(\"Yes. \"):\n # necessary for some versions of distilled models\n claims_output = claims_output[5:]\n all_claims = self._format_output(claims_output)\n\n return all_claims\n\n def _format_output(self, output):\n lines = output.split(\"\\n\")\n if lines[0].startswith(\"Nothing.\"):\n # no claims detected\n return []\n all_claims = []\n try:\n for c in lines:\n claim = c\n cleaned_claim = claim.split(f\"- \")[-1].strip()\n if cleaned_claim:\n split_term = \" The year of the results is \"\n if split_term not in cleaned_claim:\n split_term = \" The year of the claim is \"\n splitted = cleaned_claim.split(split_term)\n if len(splitted) == 2:\n cleaned_claim, year = splitted\n year = year[1:-2]\n else:\n # sometimes model output may not be well-formatted (e.g. N/A); default to none\n cleaned_claim = splitted[0]\n year = \"none\"\n all_claims.append((cleaned_claim, year))\n except Exception as e:\n logger.error(\"Error while parsing claims in %s: %s\", output, str(e))\n raise e\n\n return all_claims\n\n @staticmethod\n def remove_claims_from_previous_turns(claims: List, object_dlg_history):\n \"\"\"\n Removes claims that are repeated from the last turn. This is often the result of LLM making a mistake while splitting claims.\n But even if it is actually a claim that the chatbot repeats, removing it here is beneficial as it will reduce repetitiveness.\n \"\"\"\n previous_turn_claims = []\n for i in range(len(object_dlg_history)):\n previous_turn_claims.extend([c[0] for c in object_dlg_history[i].claims])\n claims = [c for c in claims if c[0] not in previous_turn_claims]\n\n return claims"
},
{
"identifier": "Refiner",
"path": "pipelines/refiner.py",
"snippet": "class Refiner:\n def __init__(self, prompt, args):\n self.prompt = prompt\n self.temperature = args.temperature\n self.top_p = args.top_p\n\n def set_refinement_fields(\n self,\n object_dlg_history: List[DialogueTurn],\n new_dlg_turn: DialogueTurn,\n engine_dict,\n ):\n prompt_output = llm_generate(\n template_file=self.prompt,\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"new_dlg_turn\": new_dlg_turn,\n },\n engine=engine_dict[\"default\"],\n max_tokens=300,\n temperature=self.temperature,\n top_p=self.top_p,\n stop_tokens=None,\n postprocess=False,\n )\n if self.prompt.endswith(\"refine_w_feedback.prompt\"):\n return Refiner.handle_refinement_with_feedback(new_dlg_turn, prompt_output)\n elif self.prompt.endswith(\"refine.prompt\"):\n return Refiner.handle_refinement_without_feedback(\n new_dlg_turn, prompt_output\n )\n else:\n raise ValueError(\"Unknown refinement prompt.\")\n\n @staticmethod\n def handle_refinement_without_feedback(new_dlg_turn, prompt_output):\n new_dlg_turn.refined_utterance = prompt_output.strip()\n return new_dlg_turn.refined_utterance\n\n @staticmethod\n def handle_refinement_with_feedback(new_dlg_turn, prompt_output: str):\n refine_identifiers = [\n \"Revised response after applying this feedback:\",\n \"Response after applying this feedback:\",\n ]\n for identifier in refine_identifiers:\n if identifier in prompt_output:\n feedback, prompt_output = prompt_output.split(identifier)\n\n (\n new_dlg_turn.feedback,\n new_dlg_turn.feedback_scores,\n ) = Refiner._parse_feedback(feedback)\n if sum(new_dlg_turn.feedback_scores) == 100 * len(\n new_dlg_turn.feedback_scores\n ):\n # skip refinement if it already gets full score\n new_dlg_turn.refined_utterance = new_dlg_turn.agent_utterance\n else:\n new_dlg_turn.refined_utterance = prompt_output.strip()\n return new_dlg_turn.refined_utterance\n\n logger.error(\n \"Skipping refinement due to malformatted Refined response: %s\",\n prompt_output,\n )\n new_dlg_turn.refined_utterance = new_dlg_turn.agent_utterance\n return new_dlg_turn.refined_utterance\n\n @staticmethod\n def _parse_feedback(feedback):\n if \"User:\" in feedback:\n feedback = feedback.split(\"User:\")[0]\n feedback_lines = feedback.strip().split(\"\\n\")\n\n if len(feedback_lines) < 4 or len(feedback_lines) > 5:\n logger.error(\"Feedback malformatted\")\n logger.error(feedback_lines)\n return [], []\n\n scores = (\n []\n ) # Relevant, Informative, Conversational, Non-Redundant, Temporally Correct scores\n for line in feedback_lines:\n score = line.strip().split(\" \")[-1].strip()\n if (\n score == \"N/A\" or \"this criterion is not applicable\" in line\n ): # some models say \"not applicable\" instead of N/A\n score = 100\n else:\n try:\n score = int(score.split(\"/\")[0])\n except:\n logger.error(f\"Feedback line malformatted: {line}\")\n score = 100\n scores.append(score)\n logger.info(\"Feedback scores: %s\", scores)\n return feedback_lines, scores"
},
{
"identifier": "is_everything_verified",
"path": "pipelines/utils.py",
"snippet": "def is_everything_verified(ver_out):\n \"\"\"\n Everything is verified when 1) we have only one claim and it is supported or 2) all claims are supported.\n \"\"\"\n for label_fix in ver_out:\n if label_fix[\"label\"] != \"SUPPORTS\":\n return False\n return True"
},
{
"identifier": "extract_year",
"path": "pipelines/utils.py",
"snippet": "def extract_year(title, passage):\n if title:\n passage = title + \" | \" + passage\n years = []\n year_pattern = r\"\\d{4}\"\n year_duration_pattern = r\"\\b\\d{4}[--–]\\d{2}\\b\"\n year_to_pattern = r\"\\b\\d{4} to \\d{4}\\b\"\n # extract \"1990 to 1998\" before spacy because spacy would split it to 1990 and 1998\n re_year_tos = re.findall(year_to_pattern, passage)\n for re_year_to in re_year_tos:\n re_years = re.findall(year_pattern, re_year_to)\n if len(re_years) != 2:\n continue\n year1, year2 = re_years\n years.extend(list(range(int(year1), int(year2) + 1)))\n passage.replace(re_year_to, \" \")\n\n doc = spacy_nlp(passage)\n dates = [(X.text, X.label_) for X in doc.ents if X.label_ == \"DATE\"]\n for date in dates:\n date = date[0]\n # \"the 2006–07 season\"\n re_year_durations = re.findall(year_duration_pattern, date)\n if re_year_durations:\n for re_year_duration in re_year_durations:\n if \"–\" in re_year_duration:\n year1, year2 = re_year_duration.split(\"–\")\n elif \"-\" in re_year_duration:\n year1, year2 = re_year_duration.split(\"-\")\n else:\n continue\n year2 = year1[:2] + year2\n years.extend([year1, year2])\n continue\n # any 4 digits\n re_years = re.findall(year_pattern, date)\n if re_years:\n years.extend(re_years)\n years = list(sorted(set([int(year) for year in years])))\n return years"
}
] | from concurrent.futures import ThreadPoolExecutor
from typing import List
from .dialog_turn import DialogueTurn
from llm.llm_generate import llm_generate
from .claim_splitter import ClaimSplitter
from .refiner import Refiner
from .utils import is_everything_verified, extract_year
import time
import re
import requests
import logging
import numpy as np | 4,994 |
logger = logging.getLogger(__name__)
class Chatbot:
"""
A stateless chatbot. Stateless means that it does not store the history of the dialog in itself, but requires it as an input
"""
def __init__(self, args) -> None:
# Initialize everything, because we can change the pipeline on the fly using system_parameters
self.claim_splitter = ClaimSplitter(args.claim_prompt_template_file)
self.evi_num = args.evi_num
self.colbert_endpoint = args.colbert_endpoint
self.retrieval_num = args.retrieval_num
self.refiner = Refiner(prompt=args.refinement_prompt, args=args)
self.temperature = args.temperature
self.max_tokens = args.max_tokens
self.top_p = args.top_p
self.presence_penalty = args.presence_penalty
self.frequency_penalty = args.frequency_penalty
self.skip_verification = args.skip_verification
# default parameters, can be overridden:
self.engine = args.engine
self.generate_engine = args.generate_engine
self.draft_engine = args.draft_engine
self.do_refine=args.do_refine
self.fuse_claim_splitting = args.fuse_claim_splitting
def generate_next_turn(
self,
|
logger = logging.getLogger(__name__)
class Chatbot:
"""
A stateless chatbot. Stateless means that it does not store the history of the dialog in itself, but requires it as an input
"""
def __init__(self, args) -> None:
# Initialize everything, because we can change the pipeline on the fly using system_parameters
self.claim_splitter = ClaimSplitter(args.claim_prompt_template_file)
self.evi_num = args.evi_num
self.colbert_endpoint = args.colbert_endpoint
self.retrieval_num = args.retrieval_num
self.refiner = Refiner(prompt=args.refinement_prompt, args=args)
self.temperature = args.temperature
self.max_tokens = args.max_tokens
self.top_p = args.top_p
self.presence_penalty = args.presence_penalty
self.frequency_penalty = args.frequency_penalty
self.skip_verification = args.skip_verification
# default parameters, can be overridden:
self.engine = args.engine
self.generate_engine = args.generate_engine
self.draft_engine = args.draft_engine
self.do_refine=args.do_refine
self.fuse_claim_splitting = args.fuse_claim_splitting
def generate_next_turn(
self, | object_dlg_history: List[DialogueTurn], | 0 | 2023-10-19 18:17:25+00:00 | 8k |
SunOner/yolov8_aimbot | run.py | [
{
"identifier": "Config",
"path": "logic/config_watcher.py",
"snippet": "class Config():\n def __init__(self):\n self.config = configparser.ConfigParser()\n self.Read(verbose=False)\n \n def Read(self, verbose=False):\n self.config.read('./config.ini')\n self.config_Detection_window = self.config['Detection window']\n self.detection_window_width = int(self.config_Detection_window['detection_window_width'])\n self.detection_window_height = int(self.config_Detection_window['detection_window_height'])\n \n self.config_Bettercam_Capture = self.config['Capture Methods']\n self.Bettercam_capture = self.config_Bettercam_Capture.getboolean('Bettercam_capture')\n self.bettercam_capture_fps = int(self.config_Bettercam_Capture['bettercam_capture_fps'])\n self.bettercam_monitor_id = int(self.config_Bettercam_Capture['bettercam_monitor_id'])\n self.bettercam_gpu_id = int(self.config_Bettercam_Capture['bettercam_gpu_id'])\n\n self.config_Obs_capture = self.config['Capture Methods']\n self.Obs_capture = self.config_Obs_capture.getboolean('Obs_capture')\n self.Obs_camera_id = int(self.config_Obs_capture['Obs_camera_id'])\n self.Obs_capture_fps = int(self.config_Obs_capture['Obs_capture_fps'])\n \n self.config_Aim_settings = self.config['Aim settings']\n self.body_y_offset = float(self.config_Aim_settings['body_y_offset'])\n self.hideout_targets = self.config_Aim_settings.getboolean('hideout_targets')\n self.disable_headshot = self.config_Aim_settings.getboolean('disable_headshot')\n \n self.config_Hotkeys_settings = self.config['Hotkeys settings']\n self.hotkey_targeting = str(self.config_Hotkeys_settings['hotkey_targeting'])\n self.hotkey_exit = str(self.config_Hotkeys_settings['hotkey_exit'])\n self.hotkey_pause = str(self.config_Hotkeys_settings['hotkey_pause'])\n self.hotkey_reload_config = str(self.config_Hotkeys_settings['hotkey_reload_config'])\n \n self.config_Mouse_settings = self.config['Mouse settings']\n self.mouse_dpi = float(self.config_Mouse_settings['mouse_dpi'])\n self.mouse_sensitivity = float(self.config_Mouse_settings['mouse_sensitivity'])\n self.mouse_fov = float(self.config_Mouse_settings['mouse_fov'])\n self.mouse_lock_target = self.config_Mouse_settings.getboolean('mouse_lock_target')\n self.mouse_auto_shoot = self.config_Mouse_settings.getboolean('mouse_auto_shoot')\n self.mouse_auto_aim = self.config_Mouse_settings.getboolean('mouse_auto_aim')\n self.mouse_native = self.config_Mouse_settings.getboolean('mouse_native')\n self.mouse_triggerbot = self.config_Mouse_settings.getboolean('mouse_triggerbot')\n self.mouse_move_by_arduino = self.config_Mouse_settings.getboolean('mouse_move_by_arduino')\n self.mouse_shoot_by_arduino = self.config_Mouse_settings.getboolean('mouse_shoot_by_arduino')\n \n self.config_AI_options = self.config['AI options']\n self.AI_model_path = str(self.config_AI_options['AI_model_path'])\n self.AI_image_size = int(self.config_AI_options['AI_image_size'])\n self.AI_conf = float(self.config_AI_options['AI_conf'])\n self.AI_iou = float(self.config_AI_options['AI_iou'])\n self.AI_device = str(self.config_AI_options['AI_device'])\n \n self.config_Overlay_detector = self.config['Overlay detector']\n self.show_overlay_detector = self.config_Overlay_detector.getboolean('show_overlay_detector')\n self.show_overlay_boxes = self.config_Overlay_detector.getboolean('show_overlay_boxes')\n self.show_overlay_line = self.config_Overlay_detector.getboolean('show_overlay_line')\n \n self.config_Debug_window = self.config['Debug window']\n self.show_window = self.config_Debug_window.getboolean('show_window')\n self.show_speed = self.config_Debug_window.getboolean('show_speed')\n self.show_fps = self.config_Debug_window.getboolean('show_fps')\n self.show_boxes = self.config_Debug_window.getboolean('show_boxes')\n self.show_labels = self.config_Debug_window.getboolean('show_labels')\n self.show_conf = self.config_Debug_window.getboolean('show_conf')\n self.show_target_line = self.config_Debug_window.getboolean('show_target_line')\n self.debug_window_always_on_top = self.config_Debug_window.getboolean('debug_window_always_on_top')\n self.debug_window_scale_percent = int(self.config_Debug_window['debug_window_scale_percent'])\n self.debug_window_name = str(self.config_Debug_window['debug_window_name'])\n\n if verbose:\n print('Config reloaded')"
},
{
"identifier": "MouseThread",
"path": "logic/mouse.py",
"snippet": "class MouseThread(threading.Thread):\n def __init__(self):\n super(MouseThread, self).__init__()\n self.queue = queue.Queue(maxsize=1)\n self.daemon = True\n self.dpi = cfg.mouse_dpi\n self.mouse_sensitivity = cfg.mouse_sensitivity\n self.fov = cfg.mouse_fov\n self.screen_width = cfg.detection_window_width\n self.screen_height = cfg.detection_window_height\n self.center_x = self.screen_width / 2\n self.center_y = self.screen_height / 2\n self.start()\n\n def run(self):\n while True:\n data = self.queue.get()\n if data is None:\n pass\n else:\n self.process_data(data)\n \n def process_data(self, data):\n shooting_key = self.get_shooting_key_state()\n target_x, target_y, target_w, target_h = data\n bScope = self.check_target_in_scope(target_x, target_y, target_w, target_h) if cfg.mouse_auto_shoot or cfg.mouse_triggerbot else False\n x, y = self.adjust_mouse_movement(target_x, target_y)\n self.move_mouse(x, y, shooting_key)\n self.shoot(bScope)\n \n def get_shooting_key_state(self):\n if cfg.mouse_lock_target:\n return win32api.GetKeyState(Keyboard.KEY_CODES.get(cfg.hotkey_targeting))\n return win32api.GetAsyncKeyState(Keyboard.KEY_CODES.get(cfg.hotkey_targeting))\n\n def adjust_mouse_movement(self, target_x, target_y):\n offset_x = target_x - self.center_x\n offset_y = target_y - self.center_y\n\n degrees_per_pixel_x = self.fov / self.screen_width\n\n mouse_move_x = offset_x * degrees_per_pixel_x\n\n mouse_dpi_move_x = (mouse_move_x / 360) * (self.dpi * (1 / self.mouse_sensitivity))\n\n mouse_move_y = offset_y * degrees_per_pixel_x\n mouse_dpi_move_y = (mouse_move_y / 360) * (self.dpi * (1 / self.mouse_sensitivity))\n \n return mouse_dpi_move_x, mouse_dpi_move_y\n \n def Update_settings(self):\n self.dpi = cfg.mouse_dpi\n self.mouse_sensitivity = cfg.mouse_sensitivity\n self.fov = cfg.mouse_fov\n self.screen_width = cfg.detection_window_width\n self.screen_height = cfg.detection_window_height\n \n def check_target_in_scope(self, target_x, target_y, target_w, target_h):\n x = cfg.detection_window_width / 2\n y = cfg.detection_window_height / 2\n x1 = (target_x - target_w)\n x2 = (target_x + target_w)\n y1 = (target_y - target_h)\n y2 = (target_y + target_h)\n\n if (x > x1 and x < x2 and y > y1 and y < y2) :\n return True\n else:\n return False\n\n def move_mouse(self, x, y, shooting_key):\n if x == None or y == None:\n pass\n if shooting_key == -32768 or shooting_key == 1 and cfg.mouse_auto_aim == False and cfg.mouse_triggerbot == False or cfg.mouse_auto_aim:\n if cfg.mouse_native == True and x is not None and y is not None and cfg.mouse_move_by_arduino == False: # Native move\n win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, int(x), int(y), 0, 0)\n\n if cfg.mouse_native == False and x is not None and y is not None and cfg.mouse_move_by_arduino == False: # ghub move\n ghub_mouse_xy(int(x), int(y))\n\n if cfg.mouse_move_by_arduino and x is not None and y is not None:\n Arduino.move(int(x), int(y))\n \n def shoot(self, bScope):\n # By GetAsyncKeyState\n if cfg.mouse_auto_shoot == True and cfg.mouse_triggerbot == False:\n if win32api.GetAsyncKeyState(Keyboard.KEY_CODES.get(cfg.hotkey_targeting)) == -32768 and bScope:\n if cfg.mouse_native and cfg.mouse_shoot_by_arduino == False: # native\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)\n if cfg.mouse_native == False and cfg.mouse_shoot_by_arduino == False: #ghub\n ghub_mouse_down()\n if cfg.mouse_shoot_by_arduino: # arduino\n Arduino.press()\n\n if win32api.GetAsyncKeyState(Keyboard.KEY_CODES.get(cfg.hotkey_targeting)) == 0 or bScope == False:\n if cfg.mouse_native and cfg.mouse_shoot_by_arduino == False: # native\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)\n if cfg.mouse_native == False and cfg.mouse_shoot_by_arduino == False: #ghub\n ghub_mouse_up()\n if cfg.mouse_shoot_by_arduino: # arduino\n Arduino.release()\n \n # By triggerbot\n if cfg.mouse_auto_shoot and cfg.mouse_triggerbot and bScope:\n if cfg.mouse_native and cfg.mouse_shoot_by_arduino == False: # native\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)\n if cfg.mouse_native == False and cfg.mouse_shoot_by_arduino == False: #ghub\n ghub_mouse_down()\n if cfg.mouse_shoot_by_arduino: # arduino\n Arduino.press()\n\n if cfg.mouse_auto_shoot and cfg.mouse_triggerbot and bScope == False:\n if cfg.mouse_native and cfg.mouse_shoot_by_arduino == False: # native\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)\n if cfg.mouse_native == False and cfg.mouse_shoot_by_arduino == False: #ghub\n ghub_mouse_up()\n if cfg.mouse_shoot_by_arduino: # arduino\n Arduino.release()"
}
] | from logic.config_watcher import Config
from logic.keyboard import *
from logic.capture import *
from logic.mouse import MouseThread
from ultralytics import YOLO
import math
import torch
import cv2
import time
import win32api, win32con, win32gui
import tkinter as tk | 4,481 | def spawn_debug_window():
if cfg.show_window:
print('An open debug window can affect performance.')
cv2.namedWindow(cfg.debug_window_name)
if cfg.debug_window_always_on_top:
debug_window_hwnd = win32gui.FindWindow(None, cfg.debug_window_name)
win32gui.SetWindowPos(debug_window_hwnd, win32con.HWND_TOPMOST, 100, 100, 200, 200, 0)
@torch.no_grad()
def init():
overlay = OverlayWindow() if cfg.show_overlay_detector else None
prev_frame_time, new_frame_time = 0, 0 if cfg.show_window and cfg.show_fps else None
try:
model = YOLO(f'models/{cfg.AI_model_path}', task='detect')
print_startup_messages()
except Exception as e:
print(e)
quit(0)
spawn_debug_window()
cfg_reload_prev_state = 0
shooting_queue = []
screen_center = torch.tensor([frames.screen_x_center, frames.screen_y_center], device='cuda:0')
while True:
cfg_reload_prev_state = process_hotkeys(cfg_reload_prev_state)
image = frames.get_new_frame()
result = perform_detection(model, image)
update_overlay_window(overlay)
if cfg.show_window:
annotated_frame = image
for frame in result:
if cfg.show_window and cfg.show_speed == True:
annotated_frame = speed(annotated_frame, frame.speed['preprocess'], frame.speed['inference'], frame.speed['postprocess'])
if len(frame.boxes):
if app_pause == 0:
boxes_array = frame.boxes.xywh
distances_sq = torch.sum((boxes_array[:, :2] - screen_center) ** 2, dim=1)
classes_np = frame.boxes.cls.cpu().numpy()
shooting_queue = [Target(*box[:4].cpu().numpy(), cls) for box, cls in zip(boxes_array, classes_np)]
if not cfg.disable_headshot:
sort_indices = np.lexsort((distances_sq.cpu().numpy(), classes_np != 7))
else:
class7_indices = torch.where(frame.boxes.cls == 7)[0]
if len(class7_indices) > 0:
class7_distances_sq = distances_sq[class7_indices]
sort_indices_class7 = torch.argsort(class7_distances_sq)
class7_indices = class7_indices[sort_indices_class7]
else:
sort_indices_class7 = torch.tensor([], dtype=torch.int64, device=cfg.AI_device)
other_indices = torch.where(frame.boxes.cls != 7)[0]
other_distances_sq = distances_sq[other_indices]
sort_indices_other = torch.argsort(other_distances_sq)
sort_indices = torch.cat((class7_indices, other_indices[sort_indices_other])).cpu().numpy()
shooting_queue = [shooting_queue[i] for i in sort_indices]
if shooting_queue:
target = shooting_queue[0]
mouse_worker.queue.put((target.x, target.y, target.w, target.h))
if cfg.show_window and cfg.show_target_line:
draw_target_line(annotated_frame=annotated_frame, screen_x_center=cfg.detection_window_width / 2, screen_y_center=cfg.detection_window_height / 2, target_x=target.x, target_y=target.y + cfg.body_y_offset / target.h)
if cfg.show_overlay_detector and cfg.show_overlay_boxes:
x1, y1 = target.x - target.w / 2, target.y - target.h / 2
x2, y2 = target.x + target.w / 2, target.y + target.h / 2
overlay.canvas.create_rectangle(x1.item(), y1.item(), x2.item(), y2.item(), width=2, outline='green')
if cfg.show_overlay_detector and cfg.show_overlay_line:
overlay.canvas.create_line(cfg.detection_window_width / 2, cfg.detection_window_height / 2, target.x, target.y + cfg.body_y_offset / target.h, width=2, fill='red')
shooting_queue.clear()
else: pass
if cfg.show_window and cfg.show_boxes:
draw_helpers(annotated_frame=annotated_frame, boxes=frame.boxes)
else:
mouse_worker.queue.put(None)
if cfg.show_window and cfg.show_fps:
new_frame_time = time.time()
fps = 1/(new_frame_time-prev_frame_time)
prev_frame_time = new_frame_time
cv2.putText(annotated_frame, f'FPS: {str(int(fps))}', (10, 80) if cfg.show_speed else (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
if win32api.GetAsyncKeyState(Keyboard.KEY_CODES.get(cfg.hotkey_exit)) & 0xFF:
if cfg.show_window:
cv2.destroyWindow(cfg.debug_window_name)
frames.Quit()
break
if cfg.show_window:
try:
if cfg.debug_window_scale_percent != 100:
height = int(cfg.detection_window_height * cfg.debug_window_scale_percent / 100)
width = int(cfg.detection_window_width * cfg.debug_window_scale_percent / 100)
dim = (width, height)
cv2.resizeWindow(cfg.debug_window_name, dim)
resised = cv2.resize(annotated_frame, dim, cv2.INTER_NEAREST)
cv2.imshow(cfg.debug_window_name, resised)
else:
cv2.imshow(cfg.debug_window_name, annotated_frame)
except: exit(0)
if cfg.show_window and cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__":
frames = Capture()
| cfg = Config()
if cfg.show_overlay_detector:
class Target:
def __init__(self, x, y, w, h, cls):
self.x = x
self.y = y if cls == 7 else (y - cfg.body_y_offset * h)
self.w = w
self.h = h
self.distance = math.sqrt((x - frames.screen_x_center)**2 + (y - frames.screen_y_center)**2)
self.cls = cls
class OverlayWindow:
def __init__(self):
self.overlay_detector = tk.Tk()
self.overlay_detector.geometry(f'{cfg.detection_window_width}x{cfg.detection_window_height}+{frames.Calculate_screen_offset()[0]}+{frames.Calculate_screen_offset()[1]}')
self.overlay_detector.lift()
self.overlay_detector.wm_attributes("-topmost", True)
self.overlay_detector.wm_attributes("-disabled", True)
self.overlay_detector.wm_attributes("-transparentcolor", "white")
self.overlay_detector.title('new.txt')
self.overlay_detector.overrideredirect(True)
self.canvas = tk.Canvas(self.overlay_detector, bg='white', height=cfg.detection_window_height, width=cfg.detection_window_width)
self.canvas.pack()
def perform_detection(model, image):
clss = [0, 1]
if cfg.hideout_targets:
clss += 5, 6
if cfg.disable_headshot == False:
clss.append(7)
return model.predict(
source=image,
stream=True,
cfg='logic/game.yaml',
imgsz=cfg.AI_image_size,
stream_buffer=False,
visualize=False,
augment=True,
agnostic_nms=False,
save=False,
conf=cfg.AI_conf,
iou=cfg.AI_iou,
device=cfg.AI_device,
half=False,
max_det=10,
vid_stride=False,
classes=clss,
verbose=False,
show_boxes=False,
show_labels=False,
show_conf=False,
show=False)
def print_startup_messages():
print('Aimbot is started. Enjoy!\n'
f'[{cfg.hotkey_targeting}] - Aiming at the target\n'
f'[{cfg.hotkey_exit}] - EXIT\n'
f'[{cfg.hotkey_pause}] - PAUSE AIM\n'
f'[{cfg.hotkey_reload_config}] - Reload config')
def process_hotkeys(cfg_reload_prev_state):
global app_pause
app_pause = win32api.GetKeyState(Keyboard.KEY_CODES[cfg.hotkey_pause])
app_reload_cfg = win32api.GetKeyState(Keyboard.KEY_CODES[cfg.hotkey_reload_config])
if app_reload_cfg != cfg_reload_prev_state:
if app_reload_cfg in (1, 0):
cfg.Read(verbose=True)
frames.reload_capture()
mouse_worker.Update_settings()
cfg_reload_prev_state = app_reload_cfg
return cfg_reload_prev_state
def update_overlay_window(overlay):
if cfg.show_overlay_detector:
overlay.overlay_detector.update()
overlay.canvas.delete("all")
def spawn_debug_window():
if cfg.show_window:
print('An open debug window can affect performance.')
cv2.namedWindow(cfg.debug_window_name)
if cfg.debug_window_always_on_top:
debug_window_hwnd = win32gui.FindWindow(None, cfg.debug_window_name)
win32gui.SetWindowPos(debug_window_hwnd, win32con.HWND_TOPMOST, 100, 100, 200, 200, 0)
@torch.no_grad()
def init():
overlay = OverlayWindow() if cfg.show_overlay_detector else None
prev_frame_time, new_frame_time = 0, 0 if cfg.show_window and cfg.show_fps else None
try:
model = YOLO(f'models/{cfg.AI_model_path}', task='detect')
print_startup_messages()
except Exception as e:
print(e)
quit(0)
spawn_debug_window()
cfg_reload_prev_state = 0
shooting_queue = []
screen_center = torch.tensor([frames.screen_x_center, frames.screen_y_center], device='cuda:0')
while True:
cfg_reload_prev_state = process_hotkeys(cfg_reload_prev_state)
image = frames.get_new_frame()
result = perform_detection(model, image)
update_overlay_window(overlay)
if cfg.show_window:
annotated_frame = image
for frame in result:
if cfg.show_window and cfg.show_speed == True:
annotated_frame = speed(annotated_frame, frame.speed['preprocess'], frame.speed['inference'], frame.speed['postprocess'])
if len(frame.boxes):
if app_pause == 0:
boxes_array = frame.boxes.xywh
distances_sq = torch.sum((boxes_array[:, :2] - screen_center) ** 2, dim=1)
classes_np = frame.boxes.cls.cpu().numpy()
shooting_queue = [Target(*box[:4].cpu().numpy(), cls) for box, cls in zip(boxes_array, classes_np)]
if not cfg.disable_headshot:
sort_indices = np.lexsort((distances_sq.cpu().numpy(), classes_np != 7))
else:
class7_indices = torch.where(frame.boxes.cls == 7)[0]
if len(class7_indices) > 0:
class7_distances_sq = distances_sq[class7_indices]
sort_indices_class7 = torch.argsort(class7_distances_sq)
class7_indices = class7_indices[sort_indices_class7]
else:
sort_indices_class7 = torch.tensor([], dtype=torch.int64, device=cfg.AI_device)
other_indices = torch.where(frame.boxes.cls != 7)[0]
other_distances_sq = distances_sq[other_indices]
sort_indices_other = torch.argsort(other_distances_sq)
sort_indices = torch.cat((class7_indices, other_indices[sort_indices_other])).cpu().numpy()
shooting_queue = [shooting_queue[i] for i in sort_indices]
if shooting_queue:
target = shooting_queue[0]
mouse_worker.queue.put((target.x, target.y, target.w, target.h))
if cfg.show_window and cfg.show_target_line:
draw_target_line(annotated_frame=annotated_frame, screen_x_center=cfg.detection_window_width / 2, screen_y_center=cfg.detection_window_height / 2, target_x=target.x, target_y=target.y + cfg.body_y_offset / target.h)
if cfg.show_overlay_detector and cfg.show_overlay_boxes:
x1, y1 = target.x - target.w / 2, target.y - target.h / 2
x2, y2 = target.x + target.w / 2, target.y + target.h / 2
overlay.canvas.create_rectangle(x1.item(), y1.item(), x2.item(), y2.item(), width=2, outline='green')
if cfg.show_overlay_detector and cfg.show_overlay_line:
overlay.canvas.create_line(cfg.detection_window_width / 2, cfg.detection_window_height / 2, target.x, target.y + cfg.body_y_offset / target.h, width=2, fill='red')
shooting_queue.clear()
else: pass
if cfg.show_window and cfg.show_boxes:
draw_helpers(annotated_frame=annotated_frame, boxes=frame.boxes)
else:
mouse_worker.queue.put(None)
if cfg.show_window and cfg.show_fps:
new_frame_time = time.time()
fps = 1/(new_frame_time-prev_frame_time)
prev_frame_time = new_frame_time
cv2.putText(annotated_frame, f'FPS: {str(int(fps))}', (10, 80) if cfg.show_speed else (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
if win32api.GetAsyncKeyState(Keyboard.KEY_CODES.get(cfg.hotkey_exit)) & 0xFF:
if cfg.show_window:
cv2.destroyWindow(cfg.debug_window_name)
frames.Quit()
break
if cfg.show_window:
try:
if cfg.debug_window_scale_percent != 100:
height = int(cfg.detection_window_height * cfg.debug_window_scale_percent / 100)
width = int(cfg.detection_window_width * cfg.debug_window_scale_percent / 100)
dim = (width, height)
cv2.resizeWindow(cfg.debug_window_name, dim)
resised = cv2.resize(annotated_frame, dim, cv2.INTER_NEAREST)
cv2.imshow(cfg.debug_window_name, resised)
else:
cv2.imshow(cfg.debug_window_name, annotated_frame)
except: exit(0)
if cfg.show_window and cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__":
frames = Capture() | mouse_worker = MouseThread() | 1 | 2023-10-16 11:32:57+00:00 | 8k |
jhejna/cpl | scripts/create_comparison_dataset.py | [
{
"identifier": "ReplayBuffer",
"path": "research/datasets/replay_buffer/buffer.py",
"snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functions starting with \"_\", like \"_help\" are to be used only by the replay buffer internaly. They\n are carefully setup for multiprocesing.\n 2. variables/functions named regularly without a leading \"_\" are to be used by the main thread. This includes\n standard functions like \"add\".\n\n There are a few critical setup options.\n 1. Capacity: determines if the buffer is setup upon creation. If it is set to a known value, then we can add data\n online with `add`, or by pulling more data from disk. If is set to None, the dataset is initialized to the full\n size of the offline dataset.\n 2. path: path to offline data that will be loaded\n 3. _data_generator\n\n Some options are mutually exclusive. For example, it is bad to use a non-distributed layout with\n workers and online data. This will generate a bunch of copy on writes.\n\n Data is expected to be stored in a \"next\" format. This means that data is stored like this:\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n s_3, a_2 , r_2 , d_2 ... End of episode!\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n\n This format is expected from the load(path) funciton.\n\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n sample_fn: Union[str, Callable] = \"sample\",\n sample_kwargs: Optional[Dict] = None,\n epoch_ratio: float = 1.0,\n path: Optional[str] = None,\n capacity: Optional[int] = None,\n exclude_keys: Optional[List[str]] = None,\n include_keys: Optional[Dict] = None,\n stacked_obs: bool = False,\n stacked_action: bool = False,\n distributed: bool = False,\n fetch_every: int = 1000,\n cleanup: bool = True,\n ) -> None:\n # Remove stacking if present.\n self.stacked_obs = stacked_obs\n if self.stacked_obs:\n observation_space = remove_stack_dim(observation_space)\n self.stacked_action = stacked_action\n if self.stacked_action:\n action_space = remove_stack_dim(action_space)\n\n self.observation_space = observation_space\n self.action_space = action_space\n\n # Construct the space for the buffer\n self.exclude_keys = [] if exclude_keys is None else exclude_keys # keys to exclude in the storage buffer\n buffer_space = {\n \"obs\": self.observation_space,\n \"action\": self.action_space,\n \"reward\": 0.0,\n \"done\": False,\n \"discount\": 1.0,\n }\n flattened_buffer_space = utils.flatten_dict(buffer_space)\n if include_keys is not None:\n flattened_buffer_space.update(include_keys)\n print(\"FLATTENED BUFFER SPACE\", flattened_buffer_space)\n for k in self.exclude_keys:\n if k in flattened_buffer_space:\n del flattened_buffer_space[k]\n self.buffer_space = utils.nest_dict(flattened_buffer_space)\n\n self.dummy_action = self.action_space.sample()\n self.capacity = capacity\n\n # Setup the sampler\n if isinstance(sample_fn, str):\n sample_fn = vars(sampling)[sample_fn]\n # Use functools partial to override the default args.\n sample_kwargs = {} if sample_kwargs is None else sample_kwargs\n self.sample_fn = functools.partial(sample_fn, **sample_kwargs)\n # Add sampling parameters\n self.epoch_ratio = epoch_ratio\n\n # Path for preloaded data\n self.path = path\n\n # Setup based on distributed value\n self.distributed = distributed\n if self.distributed:\n self.cleanup = cleanup\n self.fetch_every = fetch_every\n if self.capacity is not None:\n self.storage_path = tempfile.mkdtemp(prefix=\"replay_buffer_\")\n print(\"[research] Replay Buffer Storage Path\", self.storage_path)\n self.current_ep = utils.nest_dict({k: list() for k in flattened_buffer_space.keys()})\n self.num_episodes = 0\n else:\n self._alloc(self.capacity) # Alloc immediately\n\n def _alloc(self, capacity):\n # Create the data generator\n self._current_data_generator = self._data_generator()\n\n if capacity is None:\n # Allocte the entire dataset\n data = utils.concatenate(*list(self._current_data_generator), dim=0)\n self._storage = storage.FixedStorage(data)\n else:\n # Construct the buffer space. Remember to exclude any exclude keys\n self._storage = storage.CircularStorage(self.buffer_space, capacity)\n # Fill the storage.\n # if self.path is not None:\n for data in self._current_data_generator:\n self._storage.extend(data)\n if self._storage.size >= self._storage.capacity:\n break\n\n print(\"[ReplayBuffer] Allocated {:.2f} GB\".format(self._storage.bytes / 1024**3))\n\n def _data_generator(self):\n \"\"\"\n Can be overridden in order to load the initial data differently.\n By default assumes the data to be the standard format, and returned as a data dictionary.\n or\n None\n\n This function can be overriden by sub-classes in order to produce data batches.\n It should do the following:\n 1. split data across torch data workers\n 2. randomize the order of data\n 3. yield data of the form dicts\n \"\"\"\n if self.path is None:\n return\n\n # By default get all of the file names that are distributed at the correct index\n worker_info = torch.utils.data.get_worker_info()\n num_workers = 1 if worker_info is None else worker_info.num_workers\n worker_id = 0 if worker_info is None else worker_info.id\n\n ep_filenames = [os.path.join(self.path, f) for f in os.listdir(self.path) if f.endswith(\".npz\")]\n random.shuffle(ep_filenames) # Shuffle all the filenames\n\n if num_workers > 1 and len(ep_filenames) == 1:\n print(\n \"[ReplayBuffer] Warning: using multiple workers but single replay file. Reduce memory usage by sharding\"\n \" data with `save` instead of `save_flat`.\"\n )\n elif num_workers > 1 and len(ep_filenames) < num_workers:\n print(\"[ReplayBuffer] Warning: using more workers than dataset files.\")\n\n for ep_filename in ep_filenames:\n ep_idx, _ = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n # Spread loaded data across workers if we have multiple workers and files.\n if ep_idx % num_workers != worker_id and len(ep_filenames) > 1:\n continue # Only yield the files belonging to this worker.\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n yield data\n\n def _fetch_offline(self) -> int:\n \"\"\"\n This simple function fetches a new episode from the offline dataset and adds it to the buffer.\n This is done for each worker.\n \"\"\"\n try:\n data = next(self._current_data_generator)\n except StopIteration:\n self._current_data_generator = self._data_generator()\n data = next(self._current_data_generator)\n self._storage.extend(data)\n # Return the fetched size\n return len(data[\"done\"]) # data must have the done key for storage\n\n def _fetch_online(self) -> int:\n worker_info = torch.utils.data.get_worker_info()\n assert worker_info is not None, \"Must use distributed buffer for online fetching.\"\n\n ep_filenames = sorted([os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)], reverse=True)\n fetched_size = 0\n for ep_filename in ep_filenames:\n ep_idx, ep_len = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n if ep_idx % worker_info.num_workers != worker_info.id:\n continue\n if ep_filename in self._episode_filenames:\n break # We found something we have already loaded\n if fetched_size + ep_len > self._storage.capacity:\n break # do not fetch more than the size of the replay buffer\n\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n self._storage.extend(data)\n self._episode_filenames.add(ep_filename)\n if self.cleanup:\n try:\n os.remove(ep_filename)\n except OSError:\n pass\n\n return fetched_size\n\n def _get_dummy_transition(self, obs):\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n dummy_transition = {\n k: v.sample() if isinstance(v, gym.Space) else v\n for k, v in flattened_buffer_space.items()\n if not k.startswith(\"obs\") and not k.startswith(\"action\")\n }\n dummy_transition = utils.nest_dict(dummy_transition)\n dummy_transition[\"obs\"] = obs\n dummy_transition[\"action\"] = self.dummy_action\n return dummy_transition\n\n def _reset_current_ep(self):\n ep_idx = self.num_episodes\n ep_len = len(self.current_ep[\"done\"])\n self.num_episodes += 1\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n storage.save_data(self.current_ep, os.path.join(self.storage_path, ep_filename))\n\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n ep = {k: list() for k in flattened_buffer_space.keys()}\n self.current_ep = utils.nest_dict(ep)\n\n def add(self, **kwargs):\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # Preprocess here before adding to storage\n if len(kwargs) == 1:\n assert \"obs\" in kwargs\n kwargs = self._get_dummy_transition(kwargs[\"obs\"])\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n else:\n # We have a full transitions\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n if self.stacked_action:\n kwargs[\"action\"] = utils.get_from_batch(kwargs[\"action\"], -1)\n\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n\n # This function is overwritten for distributed / local buffers\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.append(self.current_ep, kwargs)\n if kwargs[\"done\"]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.add(kwargs)\n\n def extend(self, **kwargs):\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # TODO: There is a chance that if we add a full sequence we will end up with (B, T, stack, ...)\n # which is not what we want. We could compare the shapes of the observation space to fix it\n # but this code might be unnecesary, as this class shouldn't really be used like that anyways.\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.extend(self.current_ep, kwargs)\n if kwargs[\"done\"][-1]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.extend(kwargs)\n\n def save(self, path):\n os.makedirs(path, exist_ok=True)\n if self.distributed:\n if self.cleanup:\n print(\"[research] Warning, attempting to save a cleaned up replay buffer. There are likely no files\")\n srcs = os.listdir(self.storage_path)\n for src in srcs:\n shutil.move(os.path.join(self.storage_path, src), os.path.join(path, src))\n print(\"Successfully saved\", len(srcs), \"episodes.\")\n else:\n ep_len = self._storage.size\n ep_idx = 0\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n save_path = os.path.join(path, ep_filename)\n self._storage.save(save_path)\n\n def sample(self, *args, **kwargs):\n return self.sample_fn(self._storage, *args, **kwargs)\n\n def __iter__(self):\n assert not hasattr(self, \"_iterated\"), \"__iter__ called twice!\"\n self._iterated = True\n worker_info = torch.utils.data.get_worker_info()\n assert (worker_info is not None) == self.distributed, \"ReplayBuffer.distributed not set correctly!\"\n\n # allocate the buffer with the given capacity\n if self.distributed:\n self._alloc(None if self.capacity is None else self.capacity // worker_info.num_workers)\n self._episode_filenames = set()\n\n self._learning_online = False\n\n samples_since_last_offline_fetch = 0\n samples_since_last_online_fetch = 0\n last_offline_fetch_size = 0\n\n batch_size = self.sample_fn.keywords.get(\"batch_size\", 1)\n stack_size = self.sample_fn.keywords.get(\"stack\", 1)\n seq_size = self.sample_fn.keywords.get(\"seq_length\", 1)\n\n while True:\n if self._storage.size < seq_size * stack_size + 1:\n yield {} # If the buffer is too small for sampling, continue.\n else:\n sample = self.sample_fn(self._storage)\n if batch_size == 1:\n sample = utils.squeeze(sample, 0)\n yield sample\n\n # Fetch new data if we have a circular buffer.\n if isinstance(self._storage, storage.CircularStorage):\n if self.distributed: # Always check for online data\n # We fetch from the online buffer\n samples_since_last_online_fetch += 1\n if samples_since_last_online_fetch >= self.fetch_every:\n fetch_size = self._fetch_online()\n self._learning_online = self._learning_online or (fetch_size > 0)\n samples_since_last_online_fetch = 0\n\n if not self._learning_online and self.path is not None:\n # We fetch from the offline buffer\n samples_since_last_offline_fetch += 1\n data_pts_since_last_offline_fetch = (\n samples_since_last_offline_fetch * batch_size * seq_size * stack_size\n )\n if data_pts_since_last_offline_fetch >= last_offline_fetch_size * self.epoch_ratio:\n last_offline_fetch_size = self._fetch_offline()\n samples_since_last_offline_fetch = 0\n\n def __del__(self):\n if not self.distributed:\n return\n if self.cleanup:\n return\n else:\n paths = [os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)]\n for path in paths:\n try:\n os.remove(path)\n except OSError:\n pass\n try:\n os.rmdir(self.storage_path)\n except OSError:\n pass"
},
{
"identifier": "utils",
"path": "research/utils/utils.py",
"snippet": "def to_device(batch: Any, device: torch.device) -> Any:\ndef to_tensor(batch: Any) -> Any:\ndef to_np(batch: Any) -> Any:\ndef remove_float64(batch: Any):\ndef unsqueeze(batch: Any, dim: int) -> Any:\ndef squeeze(batch: Any, dim: int) -> Any:\ndef get_from_batch(batch: Any, start: Union[int, np.ndarray, torch.Tensor], end: Optional[int] = None) -> Any:\ndef set_in_batch(batch: Any, value: Any, start: int, end: Optional[int] = None) -> None:\ndef batch_copy(batch: Any) -> Any:\ndef space_copy(space: gym.Space):\ndef contains_tensors(batch: Any) -> bool:\ndef get_device(batch: Any) -> Optional[torch.device]:\ndef concatenate(*args, dim: int = 0):\ndef append(lst, item):\ndef extend(lst1, lst2):\n def __init__(self, name: str = \"\"):\n def forward(self, x: Any) -> Any:\ndef np_dataset_alloc(\n space: gym.Space, capacity: int, begin_pad: Tuple[int] = tuple(), end_pad: Tuple[int] = tuple()\n) -> np.ndarray:\ndef np_bytes_per_instance(space: gym.Space) -> int:\ndef _flatten_dict_helper(flat_dict: Dict, value: Any, prefix: str, separator: str = \".\") -> None:\ndef flatten_dict(d: Dict, separator: str = \".\") -> Dict:\ndef nest_dict(d: Dict, separator: str = \".\") -> Dict:\ndef fetch_from_dict(d: Dict, keys: Union[str, List, Tuple], separator=\".\") -> List[Any]:\ndef create_optim_groups(params, kwargs):\nclass PrintNode(torch.nn.Module):"
},
{
"identifier": "Config",
"path": "research/utils/config.py",
"snippet": "class Config(BareConfig):\n def __init__(self):\n super().__init__()\n # Define necesary fields\n\n # Manage seeding.\n self._seeded = False\n self.config[\"seed\"] = None\n\n # Env Args\n self.config[\"env\"] = None\n self.config[\"env_kwargs\"] = {}\n\n self.config[\"eval_env\"] = None\n self.config[\"eval_env_kwargs\"] = {}\n\n self.config[\"wrapper\"] = None\n self.config[\"wrapper_kwargs\"] = {}\n\n # Algorithm Args\n self.config[\"alg\"] = None\n self.config[\"alg_kwargs\"] = {}\n\n # Dataset Args\n self.config[\"dataset\"] = None\n self.config[\"dataset_kwargs\"] = {}\n\n self.config[\"validation_dataset\"] = None\n self.config[\"validation_dataset_kwargs\"] = None\n\n # Processor arguments\n self.config[\"processor\"] = None\n self.config[\"processor_kwargs\"] = {}\n\n # Optimizer Args\n self.config[\"optim\"] = None\n self.config[\"optim_kwargs\"] = {}\n\n # Network Args\n self.config[\"network\"] = None\n self.config[\"network_kwargs\"] = {}\n\n # Checkpoint\n self.config[\"checkpoint\"] = None\n\n # Schedule args\n self.config[\"schedule\"] = None\n self.config[\"schedule_kwargs\"] = {}\n\n self.config[\"trainer_kwargs\"] = {}\n\n @property\n def parsed(self):\n return self._parsed\n\n @staticmethod\n def _parse_helper(d: Dict) -> None:\n for k, v in d.items():\n if isinstance(v, list) and len(v) > 1 and v[0] == \"import\":\n # parse the value to an import\n d[k] = getattr(importlib.import_module(v[1]), v[2])\n elif isinstance(v, dict):\n Config._parse_helper(v)\n\n def parse(self) -> \"Config\":\n config = self.copy()\n Config._parse_helper(config.config)\n config._parsed = True\n # Before we make any objects, make sure we set the seeds.\n if self.config[\"seed\"] is not None:\n torch.manual_seed(self.config[\"seed\"])\n np.random.seed(self.config[\"seed\"])\n random.seed(self.config[\"seed\"])\n return config\n\n def flatten(self, separator=\".\") -> Dict:\n \"\"\"Returns a flattened version of the config where '.' separates nested values\"\"\"\n return utils.flatten_dict(self.config, separator=separator)\n\n def __setitem__(self, key: str, value: Any):\n if key not in self.config:\n raise ValueError(\n \"Attempting to set an out of structure key: \" + key + \". Configs must follow the format in config.py\"\n )\n super().__setitem__(key, value)\n\n def get_train_env_fn(self):\n \"\"\"\n Returns a function that generates a training environment, or None if no training environment is used.\n \"\"\"\n assert self.parsed\n if self[\"env\"] is None:\n return None\n else:\n return functools.partial(\n get_env,\n env=self[\"env\"],\n env_kwargs=self[\"env_kwargs\"],\n wrapper=self[\"wrapper\"],\n wrapper_kwargs=self[\"wrapper_kwargs\"],\n )\n\n def get_eval_env_fn(self):\n \"\"\"\n Returns a function that generates an evaluation environment.\n Will always return an environment.\n \"\"\"\n assert self.parsed\n # Return the evalutaion environment.\n if self[\"eval_env\"] is None:\n env, env_kwargs = self[\"env\"], self[\"env_kwargs\"]\n else:\n env, env_kwargs = self[\"eval_env\"], self[\"eval_env_kwargs\"]\n return functools.partial(\n get_env, env=env, env_kwargs=env_kwargs, wrapper=self[\"wrapper\"], wrapper_kwargs=self[\"wrapper_kwargs\"]\n )\n\n def get_spaces(self):\n # Try to get the spaces. Eval env will always return a space.\n dummy_env = self.get_eval_env_fn()() # Call the function.\n observation_space = utils.space_copy(dummy_env.observation_space)\n action_space = utils.space_copy(dummy_env.action_space)\n dummy_env.close()\n del dummy_env\n gc.collect()\n return observation_space, action_space\n\n def get_model(\n self,\n observation_space: Optional[gym.Space] = None,\n action_space: Optional[gym.Space] = None,\n device: Union[str, torch.device] = \"auto\",\n ):\n assert self.parsed\n\n if observation_space is None or action_space is None:\n observation_space, action_space = self.get_spaces()\n\n # This function returns the model\n alg_class = vars(research.algs)[self[\"alg\"]]\n dataset_class = None if self[\"dataset\"] is None else vars(research.datasets)[self[\"dataset\"]]\n validation_dataset_class = (\n None if self[\"validation_dataset\"] is None else vars(research.datasets)[self[\"validation_dataset\"]]\n )\n network_class = None if self[\"network\"] is None else vars(research.networks)[self[\"network\"]]\n optim_class = None if self[\"optim\"] is None else vars(torch.optim)[self[\"optim\"]]\n processor_class = None if self[\"processor\"] is None else vars(research.processors)[self[\"processor\"]]\n\n # Fetch the schedulers. If we don't have an schedulers dict, change it to one.\n if not isinstance(self[\"schedule\"], dict):\n schedulers_class = {DEFAULT_NETWORK_KEY: self[\"schedule\"]}\n schedulers_kwargs = {DEFAULT_NETWORK_KEY: self[\"schedule_kwargs\"]}\n else:\n schedulers_class = self[\"schedule\"]\n schedulers_kwargs = self[\"schedule_kwargs\"]\n\n # Make sure we fetch the schedule if its provided as a string\n for k in schedulers_class.keys():\n if isinstance(schedulers_class[k], str):\n # Create the lambda function, and pass it in as a keyword arg\n schedulers_kwargs[k] = dict(lr_lambda=vars(schedules)[schedulers_class[k]](**schedulers_kwargs[k]))\n schedulers_class[k] = torch.optim.lr_scheduler.LambdaLR\n\n algo = alg_class(\n observation_space,\n action_space,\n network_class,\n dataset_class,\n network_kwargs=self[\"network_kwargs\"],\n dataset_kwargs=self[\"dataset_kwargs\"],\n validation_dataset_class=validation_dataset_class,\n validation_dataset_kwargs=self[\"validation_dataset_kwargs\"],\n processor_class=processor_class,\n processor_kwargs=self[\"processor_kwargs\"],\n optim_class=optim_class,\n optim_kwargs=self[\"optim_kwargs\"],\n schedulers_class=schedulers_class,\n schedulers_kwargs=schedulers_kwargs,\n checkpoint=self[\"checkpoint\"],\n device=device,\n **self[\"alg_kwargs\"],\n )\n return algo\n\n def get_trainer(\n self,\n model=None,\n observation_space: Optional[gym.Space] = None,\n action_space: Optional[gym.Space] = None,\n device: Union[str, torch.device] = \"auto\",\n ):\n if model is None:\n if observation_space is None or action_space is None:\n observation_space, action_space = self.get_spaces()\n model = self.get_model(observation_space=observation_space, action_space=action_space, device=device)\n train_env_fn = self.get_train_env_fn()\n eval_env_fn = self.get_eval_env_fn()\n # Return the trainer...\n return Trainer(model, train_env_fn, eval_env_fn, **self[\"trainer_kwargs\"])"
}
] | import argparse
import collections
import io
import os
import numpy as np
from research.datasets import ReplayBuffer
from research.utils import utils
from research.utils.config import Config | 6,454 |
if __name__ == "__main__":
# This is a short script that generates a pairwise preference dataset from a ReplayBuffer
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, required=True, help="Path to the ReplayBuffer")
parser.add_argument("--output", type=str, required=True, help="Output path for the dataset")
parser.add_argument("--size", type=int, default=20000, help="How many data points to sample")
parser.add_argument("--segment-size", type=int, default=64, help="How large to make segments")
parser.add_argument("--checkpoint", type=str, required=True, help="Path to oracle model")
args = parser.parse_args()
# Get the model
config_path = os.path.dirname(args.checkpoint) if args.checkpoint.endswith(".pt") else args.checkpoint
config = Config.load(config_path)
config["checkpoint"] = None # Set checkpoint to None, we don't actually need to load it.
config = config.parse()
env_fn = config.get_train_env_fn()
if env_fn is None:
env_fn = config.get_eval_env_fn()
env = env_fn()
# Load the data
assert os.path.exists(args.path)
replay_buffer = ReplayBuffer(
env.observation_space, env.action_space, distributed=False, path=args.path, sample_fn="sample_sequence"
)
data = []
scores = collections.defaultdict(list)
batch_size = 256
remaining_data_points = args.size
# sample num segments...
while remaining_data_points > 0:
sample_size = min(batch_size, remaining_data_points)
batch = replay_buffer.sample(
batch_size=sample_size,
sample_by_timesteps=True,
seq_length=args.segment_size,
pad=0,
seq_keys=("obs", "action", "reward", "state", "timestep"),
)
del batch["mask"]
data.append(batch)
remaining_data_points -= sample_size
assert remaining_data_points == 0, "Must have zero remaining segments"
|
if __name__ == "__main__":
# This is a short script that generates a pairwise preference dataset from a ReplayBuffer
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, required=True, help="Path to the ReplayBuffer")
parser.add_argument("--output", type=str, required=True, help="Output path for the dataset")
parser.add_argument("--size", type=int, default=20000, help="How many data points to sample")
parser.add_argument("--segment-size", type=int, default=64, help="How large to make segments")
parser.add_argument("--checkpoint", type=str, required=True, help="Path to oracle model")
args = parser.parse_args()
# Get the model
config_path = os.path.dirname(args.checkpoint) if args.checkpoint.endswith(".pt") else args.checkpoint
config = Config.load(config_path)
config["checkpoint"] = None # Set checkpoint to None, we don't actually need to load it.
config = config.parse()
env_fn = config.get_train_env_fn()
if env_fn is None:
env_fn = config.get_eval_env_fn()
env = env_fn()
# Load the data
assert os.path.exists(args.path)
replay_buffer = ReplayBuffer(
env.observation_space, env.action_space, distributed=False, path=args.path, sample_fn="sample_sequence"
)
data = []
scores = collections.defaultdict(list)
batch_size = 256
remaining_data_points = args.size
# sample num segments...
while remaining_data_points > 0:
sample_size = min(batch_size, remaining_data_points)
batch = replay_buffer.sample(
batch_size=sample_size,
sample_by_timesteps=True,
seq_length=args.segment_size,
pad=0,
seq_keys=("obs", "action", "reward", "state", "timestep"),
)
del batch["mask"]
data.append(batch)
remaining_data_points -= sample_size
assert remaining_data_points == 0, "Must have zero remaining segments"
| data = utils.concatenate(*data, dim=0) | 1 | 2023-10-19 17:25:45+00:00 | 8k |
nbasyl/LLM-FP4 | lm_eval/models/huggingface.py | [
{
"identifier": "utils",
"path": "lm_eval/utils.py",
"snippet": "class ExitCodeError(Exception):\nclass MultiChoice:\nclass Reorderer:\ndef sh(x):\ndef escaped_split(text, sep_char, maxsplit=-1):\ndef simple_parse_args_string(args_string):\ndef join_iters(iters):\ndef chunks(iter, n=0, fn=None):\ndef group(arr, fn):\ndef _is_json_task(task_name):\n def __init__(self, choices):\n def __contains__(self, values):\n def __iter__(self):\ndef pattern_match(patterns, source_list):\ndef general_detokenize(string):\ndef get_rolling_token_windows(token_list, prefix_token, max_seq_len, context_len):\ndef make_disjoint_window(pair):\ndef select_continuation_from_batch_left_padding(\n generations: Union[List[List[int]], torch.Tensor], max_context_size: int\n):\n def __init__(self, arr, fn):\n def get_reordered(self):\n def get_original(self, newarr):\ndef positional_deprecated(fn):\n def _wrapper(*args, **kwargs):\ndef find_test_root(start_path: pathlib.Path) -> pathlib.Path:\ndef run_task_tests(task_list: List[str]):\ndef clear_torch_cache():"
},
{
"identifier": "BaseLM",
"path": "lm_eval/base.py",
"snippet": "class BaseLM(LM):\n def __init__(self):\n super().__init__()\n self.batch_schedule = 1\n self.batch_sizes = {}\n self.max_batch_size = 512\n\n @property\n @abstractmethod\n def eot_token_id(self):\n pass\n\n @property\n @abstractmethod\n def max_length(self):\n pass\n\n @property\n @abstractmethod\n def max_gen_toks(self):\n pass\n\n @property\n @abstractmethod\n def batch_size(self):\n pass\n\n @property\n @abstractmethod\n def device(self):\n pass\n\n @abstractmethod\n def tok_encode(self, string: str):\n pass\n\n @abstractmethod\n def tok_decode(self, tokens: Iterable[int]):\n pass\n\n @abstractmethod\n def _model_generate(self, context, max_length, eos_token_id):\n pass\n\n @abstractmethod\n def _model_call(self, inps):\n \"\"\"\n inps: a torch tensor of shape [batch, sequence]\n the size of sequence may vary from call to call\n\n returns: a torch tensor of shape [batch, sequence, vocab] with the\n logits returned from the model\n \"\"\"\n pass\n\n def _detect_batch_size(self, requests=None, pos=0):\n if requests:\n _, context_enc, continuation_enc = requests[pos]\n max_length = len((context_enc + continuation_enc)[-(self.max_length + 1) :][:-1])\n else:\n max_length = self.max_length\n\n # if OOM, then halves batch_size and tries again\n @find_executable_batch_size(starting_batch_size=self.max_batch_size)\n def forward_batch(batch_size):\n test_batch = torch.ones((batch_size, max_length), device=self.device).long()\n for _ in range(5):\n _ = F.log_softmax(self._model_call(test_batch), dim=-1).cpu()\n return batch_size\n\n batch_size = forward_batch()\n utils.clear_torch_cache()\n\n return batch_size\n\n # subclass must implement properties vocab_size, eot_token_id, max_gen_toks, batch_size, device, max_length.\n # TODO: enforce this somehow\n\n def _encode_pair(self, context, continuation):\n n_spaces = len(context) - len(context.rstrip())\n if n_spaces > 0:\n continuation = context[-n_spaces:] + continuation\n context = context[:-n_spaces]\n whole_enc = self.tok_encode(context + continuation)\n context_enc = self.tok_encode(context)\n context_enc_len = len(context_enc)\n continuation_enc = whole_enc[context_enc_len:]\n return context_enc, continuation_enc\n\n def loglikelihood(self, requests):\n new_reqs = []\n for context, continuation in requests:\n if context == \"\":\n # end of text as context\n context_enc, continuation_enc = [self.eot_token_id], self.tok_encode(continuation)\n else:\n context_enc, continuation_enc = self._encode_pair(context, continuation)\n\n new_reqs.append(((context, continuation), context_enc, continuation_enc))\n\n return self._loglikelihood_tokens(new_reqs)\n\n def loglikelihood_rolling(self, requests):\n # TODO: Implement caching once we've confirmed the perplexity implementation\n\n # automatic batch size detection for vectorization\n adaptive_batch_size = None\n if self.batch_size == \"auto\":\n # using rolling window with maximum context\n print(\"Passed argument batch_size = auto. Detecting largest batch size\")\n batch_size = self._detect_batch_size()\n print(f\"Determined Largest batch size: {batch_size}\")\n adaptive_batch_size = batch_size\n\n loglikelihoods = []\n for (string,) in tqdm(requests):\n rolling_token_windows = list(\n map(\n utils.make_disjoint_window,\n utils.get_rolling_token_windows(\n token_list=self.tok_encode(string),\n prefix_token=self.eot_token_id,\n max_seq_len=self.max_length,\n context_len=1,\n ),\n )\n )\n\n rolling_token_windows = [(None,) + x for x in rolling_token_windows]\n\n # TODO: extract out this call so it only gets called once and also somehow figure out partial caching for\n # that\n string_nll = self._loglikelihood_tokens(\n rolling_token_windows,\n disable_tqdm=True,\n override_bs=adaptive_batch_size,\n )\n\n # discard is_greedy\n string_nll = [x[0] for x in string_nll]\n\n string_nll = sum(string_nll)\n loglikelihoods.append(string_nll)\n\n return loglikelihoods\n\n def _loglikelihood_tokens(self, requests, disable_tqdm=False, override_bs=None):\n # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context\n res = []\n\n def _collate(x):\n # the negative sign on len(toks) sorts descending - this has a few advantages:\n # - time estimates will always be over not underestimates, which is more useful for planning\n # - to know the size of a batch when going through the list, you know the first one is always the batch\n # padded context length. this is useful to simplify the batching logic and more importantly to make\n # automatic adaptive batches much much easier to implement\n # - any OOMs will happen right away rather than near the end\n\n toks = x[1] + x[2]\n return -len(toks), tuple(toks)\n\n re_ord = utils.Reorderer(requests, _collate)\n\n reordered_requests = re_ord.get_reordered()\n n_reordered_requests = len(reordered_requests)\n\n # automatic (variable) batch size detection for vectorization\n # pull longest context sample from request\n def _batch_scheduler(pos):\n sched = pos // int(n_reordered_requests / self.batch_schedule)\n if sched in self.batch_sizes:\n return self.batch_sizes[sched]\n print(f\"Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size\")\n self.batch_sizes[sched] = self._detect_batch_size(reordered_requests, pos)\n print(f\"Determined largest batch size: {self.batch_sizes[sched]}\")\n return self.batch_sizes[sched]\n\n for chunk in utils.chunks(\n tqdm(reordered_requests, disable=disable_tqdm),\n n=self.batch_size if self.batch_size != \"auto\" else override_bs if override_bs is not None else 0,\n fn=_batch_scheduler if self.batch_size == \"auto\" and n_reordered_requests > 0 else None,\n ):\n inps = []\n cont_toks_list = []\n inplens = []\n\n padding_length = None\n\n # because vectorizing is annoying, we first convert each (context, continuation) pair to padded\n # tensors, then we pack them together into a batch, call the model, and then pick it all apart\n # again because vectorizing is annoying\n\n for _, context_enc, continuation_enc in chunk:\n # sanity check\n assert len(context_enc) > 0\n assert len(continuation_enc) > 0\n assert len(continuation_enc) <= self.max_length\n\n # how this all works:\n # CTX CONT\n # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]\n # gpt2 \\ \\\n # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the\n # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice\n\n # when too long to fit in context, truncate from the left\n inp = torch.tensor(\n (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],\n dtype=torch.long,\n ).to(self.device)\n (inplen,) = inp.shape\n\n cont = continuation_enc\n\n # since in _collate we make sure length is descending, the longest is always the first one.\n padding_length = (\n padding_length if padding_length is not None else inplen\n )\n\n # pad length from seq to padding_length\n inp = torch.cat(\n [\n inp, # [seq]\n torch.zeros(padding_length - inplen, dtype=torch.long).to(\n inp.device\n ), # [padding_length - seq]\n ],\n dim=0,\n )\n\n inps.append(inp.unsqueeze(0)) # [1, padding_length]\n cont_toks_list.append(cont)\n inplens.append(inplen)\n\n batched_inps = torch.cat(inps, dim=0) # [batch, padding_length\n multi_logits = F.log_softmax(\n self._model_call(batched_inps), dim=-1\n ).cpu() # [batch, padding_length, vocab]\n\n for (cache_key, _, _), logits, inp, inplen, cont_toks in zip(\n chunk, multi_logits, inps, inplens, cont_toks_list\n ):\n\n # Slice to original seq length\n contlen = len(cont_toks)\n logits = logits[inplen - contlen : inplen].unsqueeze(\n 0\n ) # [1, seq, vocab]\n\n # Check if per-token argmax is exactly equal to continuation\n greedy_tokens = logits.argmax(dim=-1)\n cont_toks = torch.tensor(cont_toks, dtype=torch.long).unsqueeze(\n 0\n ) # [1, seq]\n max_equal = (greedy_tokens == cont_toks).all()\n\n # Obtain log-probs at the corresponding continuation token indices\n # last_token_slice = logits[:, -1, :].squeeze(0).tolist()\n logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(\n -1\n ) # [1, seq]\n\n # Answer: (log prob, is-exact-match)\n answer = (float(logits.sum()), bool(max_equal))\n\n # partial caching\n if cache_key is not None:\n self.cache_hook.add_partial(\"loglikelihood\", cache_key, answer)\n\n res.append(answer)\n\n return re_ord.get_original(res)\n\n def greedy_until(self, requests):\n # TODO: implement fully general `until` that handles until that are\n # multiple tokens or that span multiple tokens correctly\n\n # TODO: extract to TokenizedLM?\n res = []\n\n def _collate(x):\n toks = self.tok_encode(x[0])\n return len(toks), x[0]\n\n re_ord = utils.Reorderer(requests, _collate)\n\n for context, request_args in tqdm(re_ord.get_reordered()):\n until = request_args[\"until\"]\n if isinstance(until, str):\n until = [until]\n\n if until:\n (primary_until,) = self.tok_encode(until[0])\n else:\n primary_until = None\n\n context_enc = torch.tensor(\n [self.tok_encode(context)[self.max_gen_toks - self.max_length :]]\n ).to(self.device)\n\n max_gen_tokens = min(\n self.max_gen_toks, request_args.get(\"max_length\", self.max_gen_toks)\n )\n cont = self._model_generate(\n context_enc, context_enc.shape[1] + max_gen_tokens, primary_until\n )\n\n s = self.tok_decode(cont[0].tolist()[context_enc.shape[1] :])\n\n for term in until:\n s = s.split(term)[0]\n\n # partial caching\n self.cache_hook.add_partial(\"greedy_until\", (context, until), s)\n\n res.append(s)\n\n return re_ord.get_original(res)"
}
] | import math
import torch
import torch.nn.functional as F
import transformers
import peft
from peft import __version__ as PEFT_VERSION
from pathlib import Path
from typing import List, Mapping, NewType, Optional, Tuple, Union
from tqdm import tqdm
from transformers import BatchEncoding
from lm_eval import utils
from lm_eval.base import BaseLM
from auto_gptq import AutoGPTQForCausalLM | 3,676 |
TokenSequence = Union[List[int], torch.LongTensor, torch.Tensor, BatchEncoding]
_DeviceMapping = NewType("DeviceMapping", Mapping[str, Union[int, str, torch.device]])
def _get_accelerate_args(
device_map_option: Optional[str] = "auto",
max_memory_per_gpu: Optional[Union[int, str]] = None,
max_cpu_memory: Optional[Union[int, str]] = None,
offload_folder: Optional[str] = "./offload",
) -> dict:
"""Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`."""
max_memory = {}
if max_memory_per_gpu is not None:
max_memory_per_gpu_map = {
device_idx: max_memory_per_gpu
for device_idx in range(torch.cuda.device_count())
}
max_memory.update(max_memory_per_gpu_map)
if max_cpu_memory is not None:
max_memory["cpu"] = max_cpu_memory
args = {}
if max_memory:
args["max_memory"] = max_memory
args["device_map"] = device_map_option
args["offload_folder"] = offload_folder
return args
def _get_dtype(
dtype: Union[str, torch.dtype], config: Optional[transformers.AutoConfig] = None
) -> torch.dtype:
"""Converts `dtype` from `str` to torch.dtype when possible."""
if dtype is None and config is not None:
_torch_dtype = config.torch_dtype
elif isinstance(dtype, str) and dtype != "auto":
# Convert `str` args torch dtype: `float16` -> `torch.float16`
_torch_dtype = getattr(torch, dtype)
else:
_torch_dtype = dtype
return _torch_dtype
|
TokenSequence = Union[List[int], torch.LongTensor, torch.Tensor, BatchEncoding]
_DeviceMapping = NewType("DeviceMapping", Mapping[str, Union[int, str, torch.device]])
def _get_accelerate_args(
device_map_option: Optional[str] = "auto",
max_memory_per_gpu: Optional[Union[int, str]] = None,
max_cpu_memory: Optional[Union[int, str]] = None,
offload_folder: Optional[str] = "./offload",
) -> dict:
"""Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`."""
max_memory = {}
if max_memory_per_gpu is not None:
max_memory_per_gpu_map = {
device_idx: max_memory_per_gpu
for device_idx in range(torch.cuda.device_count())
}
max_memory.update(max_memory_per_gpu_map)
if max_cpu_memory is not None:
max_memory["cpu"] = max_cpu_memory
args = {}
if max_memory:
args["max_memory"] = max_memory
args["device_map"] = device_map_option
args["offload_folder"] = offload_folder
return args
def _get_dtype(
dtype: Union[str, torch.dtype], config: Optional[transformers.AutoConfig] = None
) -> torch.dtype:
"""Converts `dtype` from `str` to torch.dtype when possible."""
if dtype is None and config is not None:
_torch_dtype = config.torch_dtype
elif isinstance(dtype, str) and dtype != "auto":
# Convert `str` args torch dtype: `float16` -> `torch.float16`
_torch_dtype = getattr(torch, dtype)
else:
_torch_dtype = dtype
return _torch_dtype
| class HuggingFaceAutoLM(BaseLM): | 1 | 2023-10-15 06:05:13+00:00 | 8k |
alextamkin/generative-elicitation | pool_based_agent.py | [
{
"identifier": "BaseActiveLearningAgent",
"path": "base_active_learning_agent.py",
"snippet": "class BaseActiveLearningAgent(ABC):\n \n def __init__(self, target_specification_file, engine, openai_cache_file=None, **kwargs):\n self.get_gold_domain_info(target_specification_file)\n self.engine = engine\n self.openai_cache_file = openai_cache_file\n self.openai_cache = load_openai_cache(openai_cache_file)\n self.temperature = kwargs.get(\"temperature\", 0.0)\n\n self.interaction_history = []\n\n\n def get_gold_domain_info(self, target_specification_file):\n '''Gets the gold domain specification that the model should try to learn and other associated information.\n '''\n gold_task = json.load(open(target_specification_file)) #\"sample_tests.json\"\n for key in gold_task:\n setattr(self, key, gold_task[key])\n if key == \"regex\":\n self.gold_regex_text = self.regex\n self.gold_regex = re.compile(self.gold_regex_text)\n self.persona_text = self.persona\n\n def get_task_description(self):\n return \"validate an email address adheres to a specific format\"\n\n @staticmethod\n def format_questions_and_answers(questions_and_answers):\n '''Formats the questions and answers into a string.\n\n Looks like:\n - Should the system allow numbers in the domain? -> Yes\n\n Args:\n questions_and_answers (list): A list of tuples of the form (question, answer).\n \n Returns:\n str: The formatted questions and answers.\n '''\n return '\\n'.join([f\"- {question} -> {answer}\" for question, answer in questions_and_answers])\n\n def get_test_case_prompt(self, interaction_history, test_case):\n hypothesis_prompt = textwrap.dedent('''\\\n {single_instance_prompt1}\n {previous_examples}\n \n {single_instance_prompt2}\n {test_case}\n '''\n ).format(\n single_instance_prompt1=self.test_case_prompt[0],\n previous_examples=self.format_questions_and_answers(interaction_history),\n single_instance_prompt2=self.test_case_prompt[1],\n test_case=test_case,\n )\n return [{\"role\": \"user\", \"content\": hypothesis_prompt}]\n \n def generate_test_case_answer(self, test_case):\n test_case_messages = self.get_test_case_prompt(self.interaction_history, test_case)\n test_case_answer, _ = query_api(test_case_messages, self.engine, self.openai_cache, self.openai_cache_file)\n test_case_answer = test_case_answer.strip().lower()\n \n return test_case_answer\n\n def score_test_cases_direct(self, start_metrics=None):\n \"\"\"\n Condition on query answers directly to score the test cases.\n start_metrics (dict): metrics at the start of the interaction, set to None if computing absolute metrics, else compute relative metrics\n \n Returns:\n Tuple[Dict, List[Dict]]: A tuple of the following:\n Dict: scores (dict): A dictionary containing the accuracy and F1 score of the answers on the test cases.\n accuracy (float): The accuracy of the answers on the test cases.\n AUCROC (float): The AUCROC score of the answers on the test cases.\n correct_prob (float): The probability on the correct answer.\n List[Dict]: all_test_details (list): A list of dictionaries containing the details of each test case.\n \"\"\"\n # Query Asynchronous API\n all_test_case_messages = []\n test_case_to_answer = {}\n for test_case in self.test_cases:\n # test_case: tuple of (query, answer)\n test_case_messages = self.get_test_case_prompt(self.interaction_history, test_case[0])\n all_test_case_messages.append(test_case_messages)\n answer, _ = query_api(test_case_messages, self.engine, self.openai_cache, self.openai_cache_file)\n test_case_to_answer[json.dumps(test_case_messages)] = answer.strip().lower()\n\n # Compute Accuracy and AUCROC and correct_prob\n tests_passed = []\n all_test_details = []\n pred_probs = []\n correct_probs = []\n for test_case_message, test_case in zip(all_test_case_messages, self.test_cases):\n while True:\n try:\n pred_prob = float(test_case_to_answer[json.dumps(test_case_message)].strip().lower())\n break\n except:\n test_case_message.append({'role': 'user', 'content': 'Please make your best guess as to a probability. Output the probability and nothing else.'})\n pred_prob, _ = query_api(test_case_message, self.engine, self.openai_cache, self.openai_cache_file)\n test_case_to_answer[json.dumps(test_case_message)] = pred_prob\n pred_probs.append(pred_prob)\n pred_answer = 1 if pred_prob > 0.5 else 0\n actual_answer = 1 if test_case[1] else 0\n tests_passed.append(pred_answer == actual_answer)\n correct_probs.append(pred_prob if actual_answer else 1 - pred_prob)\n all_test_details.append({\n \"query\": test_case[0],\n \"pred_prob\": pred_prob,\n \"pred\": pred_answer,\n \"actual\": actual_answer,\n \"correct?\": pred_answer == actual_answer,\n \"correct_prob\": pred_prob if actual_answer else 1 - pred_prob,\n })\n try:\n aucroc = roc_auc_score([test_case[1] for test_case in self.test_cases], pred_probs)\n except:\n # only 1 class present....\n aucroc = 0\n print(\"====\")\n\n metrics_dict = {\n \"accuracy\": sum(tests_passed) / len(tests_passed),\n \"AUCROC\": aucroc,\n \"correct_prob\": sum(correct_probs) / len(correct_probs),\n }\n if start_metrics is None:\n start_metrics = {\n \"accuracy\": [metrics_dict[\"accuracy\"]],\n \"AUCROC\": [metrics_dict[\"AUCROC\"]],\n \"correct_prob\": [metrics_dict[\"correct_prob\"]],\n }\n metrics_dict[\"accuracy_relative\"] = metrics_dict[\"accuracy\"] - start_metrics[\"accuracy\"][0]\n metrics_dict[\"AUCROC_relative\"] = metrics_dict[\"AUCROC\"] - start_metrics[\"AUCROC\"][0]\n metrics_dict[\"correct_prob_relative\"] = metrics_dict[\"correct_prob\"] - start_metrics[\"correct_prob\"][0]\n \n return metrics_dict, all_test_details\n\n def score_test_cases(self, start_metrics=None):\n \"\"\"\n Scores the test cases.\n\n Args:\n score_type (str): The type of scoring to use. Can be \"no_hypothesis\", \"hypothesis\", or \"select\".\n\n Returns:\n Tuple[Dict, List[Dict]]: A tuple of the following:\n Dict: scores (dict): A dictionary containing the accuracy and F1 score of the answers on the test cases.\n accuracy (float): The accuracy of the answers on the test cases.\n f1 (float): The F1 score of the answers on the test cases.\n List[Dict]: all_test_details (list): A list of dictionaries containing the details of each test case.\n \"\"\"\n return self.score_test_cases_direct(start_metrics=start_metrics)\n\n def generate_hypothesis_regex(self):\n \"\"\"\n Generates a hypothesis regex given a task description and the previous interaction history.\n\n Loops until a compileable regex is produced. Regexes that fail to compile are stored in broken_regexes and used to prompt the model for a regex that compiles.\n \n Returns:\n hypothesis_regex (str)\n \"\"\"\n broken_regexes = []\n\n # Loop until we get a regex that compiles.\n while True:\n hypothesis_messages = self.get_hypothesis_prompt(self.task_description, self.interaction_history, broken_regexes)\n hypothesis_regex_text, _ = query_api(hypothesis_messages, self.engine, self.openai_cache, self.openai_cache_file)\n hypothesis_regex_text = self.strip_hypothesis_regex(hypothesis_regex_text)\n print('Hypothesis regex (post-strip):', hypothesis_regex_text)\n try:\n hypothesis_regex = re.compile(hypothesis_regex_text)\n except re.error:\n broken_regexes.append(hypothesis_regex_text)\n print(\"Failed to compile hypothesis regex\")\n continue\n break\n \n return hypothesis_regex\n\n def strip_hypothesis_regex(self, hypothesis_regex_text):\n '''Strips the hypothesis regex of quotes.\n \n Args:\n hypothesis_regex_text (str): The hypothesis regex to strip.\n \n Returns:\n str: The stripped hypothesis regex.\n '''\n hypothesis_regex_text = hypothesis_regex_text.strip('\"').strip(\"'\").strip(\"`\")\n return hypothesis_regex_text\n\n @abstractmethod\n def get_hypothesis_prompt(self, interaction_history, broken_regexes=None):\n '''Creates prompt for the model which produces a hypothesis using the given active learning framework.\n \n Args:\n task_description (str): Description of the task\n interaction_history (list of tuples): List of (question, answer) tuples. The precise format of the questions / answers differs based on the type of active learning agent.\n broken_regexes (list of str): List of strings holding previous hypotheses that failed to compile.\n\n Returns:\n prompt (str): Prompt for the model to generate a new hypothesis\n '''\n pass\n \n @abstractmethod\n def generate_active_query(self):\n '''Generates an active query to ask the oracle.'''\n pass\n\n @abstractmethod\n def generate_oracle_response(self, query):\n '''Produces an oracle response to the active query, and adds (query, response) to self.interaction_history.'''\n pass\n \n def update_interaction_history(self, active_query, oracle_response):\n '''Updates self.interaction_history based on the active query and oracle response.'''\n self.interaction_history.append((active_query, oracle_response))\n\n def add_turn(self, query, response):\n '''Add (query, response) to self.interaction_history.'''\n self.interaction_history.append((query, response))\n \n def get_query_prompt(self):\n pass\n\n def get_oracle_prompt(self, question, question_type):\n answer_description = \"Answer the question in the shortest way with minimal additional explanation.\"\n oracle_prompt = textwrap.dedent('''\\\n {persona} {answer_description}\n {question}'''\n ).format(\n persona=self.persona,\n answer_description=answer_description,\n question=question\n )\n print(oracle_prompt)\n print(\"===\")\n return oracle_prompt\n\n def query_oracle_api(self, question, question_type):\n oracle_prompt = self.get_oracle_prompt(question, question_type)\n answer, _ = query_api([{\"role\": \"user\", \"content\": oracle_prompt}], self.engine, self.openai_cache, self.openai_cache_file, temperature=self.temperature)\n return answer\n\n def evaluate_condition(self, **kwargs):\n return True\n \n def get_interaction_features(self):\n \"\"\"\n Returns a dictionary of features for the current interaction trajectory.\n\n The features are:\n - interaction_time: total time spent interacting with the system (in minutes)\n - interaction_num_turns: number of turns in the interaction\n - interaction_total_char_length: total number of characters in the user's messages\n \"\"\"\n return {\n \"interaction_num_turns\": len(self.interaction_history),\n }"
},
{
"identifier": "query_api",
"path": "utils.py",
"snippet": "@retry(wait=wait_random_exponential(min=1, max=60))\ndef query_api(messages, engine, openai_cache=None, openai_cache_file=None, **kwargs):\n '''Queries the OpenAI API with the given messages.\n \n NOTE: This function mutates the messages list to add the new_message and the response from the API.\n \n Args:\n messages (list): A list of past messages to send to the API.\n openai_cache (dict, optional): The openai cache dict. Stores the API responses to avoid duplicate queries. Defaults to None.\n openai_cache_file (str, optional): The path to write the cache entries to. Defaults to None.\n \n Returns:\n str: The response from the API.\n '''\n messages_cache_key = json.dumps(messages)\n if openai_cache and messages_cache_key in openai_cache:\n response = openai_cache[messages_cache_key]\n else:\n if \"temperature\" not in kwargs:\n kwargs[\"temperature\"] = 0.0\n if engine == \"gpt-4\" or engine == \"gpt-3.5-turbo\":\n response = openai.ChatCompletion.create(\n model=engine,\n messages=messages,\n **kwargs\n )\n else:\n response = openai.Completion.create(\n engine=engine,\n prompt=messages[0],\n **kwargs\n )\n save_openai_cache({messages_cache_key: response}, openai_cache, openai_cache_file)\n if engine == \"gpt-4\" or engine == \"gpt-3.5-turbo\":\n response_text = response['choices'][0]['message']['content']\n messages.append({'role': 'assistant', 'content': response_text})\n else:\n response_text = response['choices'][0]['text']\n return response_text, response"
},
{
"identifier": "load_openai_cache",
"path": "utils.py",
"snippet": "def load_openai_cache(openai_cache_file):\n '''Loads the openai cache file into a dict.\n \n Args:\n openai_cache_file (str): The path to the openai cache file.\n \n Returns:\n dict: The openai cache dict.\n '''\n if not openai_cache_file:\n return None\n openai_cache = {}\n if os.path.exists(openai_cache_file):\n with open(openai_cache_file) as f:\n for line in f:\n openai_cache.update(json.loads(line))\n return openai_cache"
},
{
"identifier": "async_query_api",
"path": "utils.py",
"snippet": "def async_query_api(\n message_history_list,\n engine: str,\n openai_cache=None,\n openai_cache_file=None,\n **kwargs,\n):\n return asyncio.run(dispatch_openai_requests(message_history_list, engine, openai_cache, openai_cache_file, **kwargs))"
}
] | import textwrap
import numpy as np
import random
import json
from base_active_learning_agent import BaseActiveLearningAgent
from utils import query_api, load_openai_cache, async_query_api
from tqdm import tqdm
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans | 4,654 | super().__init__(target_specification_file, engine, openai_cache_file, **kwargs)
# either specified in `target_specification_file` or in args
if pool_data_path is not None:
self.pool_data_path = pool_data_path
if pool_al_sampling_type is not None:
self.pool_al_sampling_type = pool_al_sampling_type
self.pool_al_examples = self.load_pool_examples(self.pool_data_path)
self.previous_samples = []
if self.pool_al_sampling_type == "diversity":
self.num_clusters = pool_diversity_num_clusters
print("Loading sentence transformer model...")
model = SentenceTransformer('all-MiniLM-L6-v2')
print("Embedding pool examples...")
# embed everything
self.pool_al_examples_embeddings = model.encode(self.pool_al_examples)
kmeans = KMeans(n_clusters=self.num_clusters, random_state=0).fit(self.pool_al_examples_embeddings)
# get centroids of clusters
centroids = kmeans.cluster_centers_
# get closest example to each centroid
self.all_samples = []
# round robin
self.curr_centroid_idx = 0
for centroid_idx, centroid in enumerate(centroids):
# closest_example_idx = np.argmin(np.linalg.norm(self.pool_al_examples_embeddings - centroid, axis=1))
cluster_samples = np.where(kmeans.labels_ == centroid_idx)[0]
# sort by distance (smallest to largest)
cluster_samples = cluster_samples[np.argsort(np.linalg.norm(self.pool_al_examples_embeddings[cluster_samples] - centroid, axis=1))]
self.all_samples.append([self.pool_al_examples[pool_sample] for pool_sample in cluster_samples])
all_samples = []
for sample in self.all_samples: all_samples.extend(sample)
assert set(all_samples) == set(self.pool_al_examples)
if self.pool_al_sampling_type == "uncertainty_logits":
self.engine_selection = "text-davinci-003"
self.openai_cache_selection_file = f"{self.engine_selection}-cache.jsonl"
self.openai_cache_selection = load_openai_cache(self.openai_cache_selection_file)
def load_pool_examples(self, pool_fp):
# csv_reader = csv.DictReader(open(pool_fp, 'r'), delimiter='\t')
pool_examples = []
for row in open(pool_fp):
pool_examples.append(json.loads(row)["nl_desc"])
return pool_examples
def format_edge_cases(self, edge_cases):
return '\n'.join([f"{idx+1}. {edge_case[0]} -> {edge_case[1]}" for idx, edge_case in enumerate(edge_cases)])
def format_al_json_samples(self, edge_cases):
return json.dumps([{"sample": sample.strip()} for sample in edge_cases])
@staticmethod
def strip_edge_case(edge_case):
# Strip label
edge_case = edge_case.split(" -> ")[0]
# Strip beginning dashes
if edge_case.startswith("- "):
edge_case = edge_case[2:]
return edge_case
def get_hypothesis_prompt(self):
pass
def get_query_prompt(self):
return f"pool_{self.pool_al_sampling_type}"
def generate_active_query(self):
'''Generates the next active learning query.'''
if self.pool_al_sampling_type == "uncertainty_tokens":
sample = self.generate_active_query_uncertainty_tokens(batch_size=10)
elif self.pool_al_sampling_type == "uncertainty_logits":
sample = self.generate_active_query_uncertainty_logits()
elif self.pool_al_sampling_type == "diversity":
sample = self.generate_active_query_diversity()
elif self.pool_al_sampling_type == "random":
sample = self.generate_active_query_random()
else:
raise NotImplementedError
self.previous_samples.append(sample)
self.pool_al_examples.remove(sample)
print(sample)
print("===")
return self.example_edge_case_question_format.replace("[edge case]", sample)
def generate_active_query_diversity(self):
# make people go through a fixed number (k turns)
# if len(self.previous_samples) >= len(self.all_samples):
# return self.generate_active_query_random()
next_sample = self.all_samples[self.curr_centroid_idx].pop(0)
self.curr_centroid_idx = (self.curr_centroid_idx + 1) % self.num_clusters
return next_sample
def generate_active_query_random(self):
random_sample = random.choice(self.pool_al_examples)
return random_sample
def generate_active_query_uncertainty_tokens(self, batch_size):
'''Samples the most uncertain edge case for the oracle.'''
"""
TODO old code... remove
"""
most_uncertain_edge_case = None
max_uncertainty = 0
for possible_next_edge_case_idx in tqdm(range(0, len(self.pool_al_examples), batch_size)):
next_edge_cases = self.pool_al_examples[possible_next_edge_case_idx:possible_next_edge_case_idx+batch_size]
al_template = textwrap.dedent('''\
{pool_al_prompt}
{previous_examples}
{pool_al_prompt2}
{next_edge_cases}
Return a json list of the form [{{"sample": sample, "pred label": yes/no, "pred prob": probability of predicted label for the sample}}]. Please stick to this format and return nothing else.'''
).format(
pool_al_prompt=self.pool_al_prompt[0],
previous_examples=self.format_edge_cases([
[self.previous_samples[idx], item[1]] for idx, item in enumerate(self.interaction_history)
]),
pool_al_prompt2=self.pool_al_prompt[1],
next_edge_cases=self.format_al_json_samples(next_edge_cases),
)
|
IMPLEMENTATION = "system" #["Python regex", "system"]
class PoolBasedAgent(BaseActiveLearningAgent):
"""Active learning agent that generates edge cases to identify the target regex."""
def __init__(self, target_specification_file, engine, openai_cache_file=None, pool_data_path=None, pool_al_sampling_type=None, pool_diversity_num_clusters=None, **kwargs):
super().__init__(target_specification_file, engine, openai_cache_file, **kwargs)
# either specified in `target_specification_file` or in args
if pool_data_path is not None:
self.pool_data_path = pool_data_path
if pool_al_sampling_type is not None:
self.pool_al_sampling_type = pool_al_sampling_type
self.pool_al_examples = self.load_pool_examples(self.pool_data_path)
self.previous_samples = []
if self.pool_al_sampling_type == "diversity":
self.num_clusters = pool_diversity_num_clusters
print("Loading sentence transformer model...")
model = SentenceTransformer('all-MiniLM-L6-v2')
print("Embedding pool examples...")
# embed everything
self.pool_al_examples_embeddings = model.encode(self.pool_al_examples)
kmeans = KMeans(n_clusters=self.num_clusters, random_state=0).fit(self.pool_al_examples_embeddings)
# get centroids of clusters
centroids = kmeans.cluster_centers_
# get closest example to each centroid
self.all_samples = []
# round robin
self.curr_centroid_idx = 0
for centroid_idx, centroid in enumerate(centroids):
# closest_example_idx = np.argmin(np.linalg.norm(self.pool_al_examples_embeddings - centroid, axis=1))
cluster_samples = np.where(kmeans.labels_ == centroid_idx)[0]
# sort by distance (smallest to largest)
cluster_samples = cluster_samples[np.argsort(np.linalg.norm(self.pool_al_examples_embeddings[cluster_samples] - centroid, axis=1))]
self.all_samples.append([self.pool_al_examples[pool_sample] for pool_sample in cluster_samples])
all_samples = []
for sample in self.all_samples: all_samples.extend(sample)
assert set(all_samples) == set(self.pool_al_examples)
if self.pool_al_sampling_type == "uncertainty_logits":
self.engine_selection = "text-davinci-003"
self.openai_cache_selection_file = f"{self.engine_selection}-cache.jsonl"
self.openai_cache_selection = load_openai_cache(self.openai_cache_selection_file)
def load_pool_examples(self, pool_fp):
# csv_reader = csv.DictReader(open(pool_fp, 'r'), delimiter='\t')
pool_examples = []
for row in open(pool_fp):
pool_examples.append(json.loads(row)["nl_desc"])
return pool_examples
def format_edge_cases(self, edge_cases):
return '\n'.join([f"{idx+1}. {edge_case[0]} -> {edge_case[1]}" for idx, edge_case in enumerate(edge_cases)])
def format_al_json_samples(self, edge_cases):
return json.dumps([{"sample": sample.strip()} for sample in edge_cases])
@staticmethod
def strip_edge_case(edge_case):
# Strip label
edge_case = edge_case.split(" -> ")[0]
# Strip beginning dashes
if edge_case.startswith("- "):
edge_case = edge_case[2:]
return edge_case
def get_hypothesis_prompt(self):
pass
def get_query_prompt(self):
return f"pool_{self.pool_al_sampling_type}"
def generate_active_query(self):
'''Generates the next active learning query.'''
if self.pool_al_sampling_type == "uncertainty_tokens":
sample = self.generate_active_query_uncertainty_tokens(batch_size=10)
elif self.pool_al_sampling_type == "uncertainty_logits":
sample = self.generate_active_query_uncertainty_logits()
elif self.pool_al_sampling_type == "diversity":
sample = self.generate_active_query_diversity()
elif self.pool_al_sampling_type == "random":
sample = self.generate_active_query_random()
else:
raise NotImplementedError
self.previous_samples.append(sample)
self.pool_al_examples.remove(sample)
print(sample)
print("===")
return self.example_edge_case_question_format.replace("[edge case]", sample)
def generate_active_query_diversity(self):
# make people go through a fixed number (k turns)
# if len(self.previous_samples) >= len(self.all_samples):
# return self.generate_active_query_random()
next_sample = self.all_samples[self.curr_centroid_idx].pop(0)
self.curr_centroid_idx = (self.curr_centroid_idx + 1) % self.num_clusters
return next_sample
def generate_active_query_random(self):
random_sample = random.choice(self.pool_al_examples)
return random_sample
def generate_active_query_uncertainty_tokens(self, batch_size):
'''Samples the most uncertain edge case for the oracle.'''
"""
TODO old code... remove
"""
most_uncertain_edge_case = None
max_uncertainty = 0
for possible_next_edge_case_idx in tqdm(range(0, len(self.pool_al_examples), batch_size)):
next_edge_cases = self.pool_al_examples[possible_next_edge_case_idx:possible_next_edge_case_idx+batch_size]
al_template = textwrap.dedent('''\
{pool_al_prompt}
{previous_examples}
{pool_al_prompt2}
{next_edge_cases}
Return a json list of the form [{{"sample": sample, "pred label": yes/no, "pred prob": probability of predicted label for the sample}}]. Please stick to this format and return nothing else.'''
).format(
pool_al_prompt=self.pool_al_prompt[0],
previous_examples=self.format_edge_cases([
[self.previous_samples[idx], item[1]] for idx, item in enumerate(self.interaction_history)
]),
pool_al_prompt2=self.pool_al_prompt[1],
next_edge_cases=self.format_al_json_samples(next_edge_cases),
) | response, _ = query_api( | 1 | 2023-10-16 18:43:47+00:00 | 8k |
bcmi/libcom | libcom/shadow_generation/source/PostProcessModel.py | [
{
"identifier": "ControlLDM",
"path": "libcom/shadow_generation/source/cldm/cldm.py",
"snippet": "class ControlLDM(LatentDiffusion):\n\n def __init__(self, control_stage_config, control_key, only_mid_control, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.control_model = instantiate_from_config(control_stage_config)\n self.control_key = control_key\n self.shadow_mask = \"mask\"\n self.only_mid_control = only_mid_control\n self.control_scales = [1.0] * 13\n self.LGP = latent_guidance_predictor(output_chan=1, input_chan=2240, num_encodings=9)\n\n @torch.no_grad()\n def get_input(self, batch, k, bs=None, *args, **kwargs):\n x, c = super().get_input(batch, self.first_stage_key, *args, **kwargs)\n control = batch[self.control_key]\n if bs is not None:\n control = control[:bs]\n control = control.to(self.device)\n control = einops.rearrange(control, 'b h w c -> b c h w')\n control = control.to(memory_format=torch.contiguous_format).float()\n mask = batch[self.shadow_mask]\n # mask = None\n return x, dict(c_crossattn=[c], c_concat=[control]), mask\n\n def apply_model(self, x_noisy, t, cond, *args, **kwargs):\n assert isinstance(cond, dict)\n diffusion_model = self.model.diffusion_model\n\n cond_txt = torch.cat(cond['c_crossattn'], 1)\n\n if cond['c_concat'] is None:\n eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=None, only_mid_control=self.only_mid_control)\n else:\n control = self.control_model(x=x_noisy, hint=torch.cat(cond['c_concat'], 1), timesteps=t, context=cond_txt)\n control = [c * scale for c, scale in zip(control, self.control_scales)]\n eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=control, only_mid_control=self.only_mid_control)\n\n return eps\n '''\n def training_step(self, batch, batch_idx):\n self.LGP.train()\n x, c, mask= self.get_input(batch, self.first_stage_key)\n features, encoded_edge_maps, noise_levels = [], [], []\n save_hook = save_out_hook\n blocks = [0,1,2,3]\n self.feature_blocks = []\n batch_size = batch['mask'].shape[0]\n if batch_size != 2:\n return None\n comp_img = batch['hint'][:, :, :, :3].permute(0,3,1,2)\n\n for idx, block in enumerate(self.model.diffusion_model.down_blocks):\n if idx in blocks:\n h=block.register_forward_hook(save_hook)\n self.feature_blocks.append([block,h]) \n \n # for idx, block in enumerate(self.model.diffusion_model.up_blocks):\n # if idx in blocks:\n # h=block.register_forward_hook(save_hook)\n # self.feature_blocks.append([block,h]) \n\n loss_noise1, _ = self(x, c, mask)\n\n loss_noise2, _, pred_x0 = self(x, c, mask, train_mask_only=True)\n\n activations = []\n\n for block,h in self.feature_blocks:\n activations.append(block.activations)\n block.activations = None\n h.remove()\n\n features = resize_and_concatenate(activations, x)\n\n gt_mask = batch[\"gt_mask\"].unsqueeze(1)\n\n predicted_mask = self.LGP(features)\n predicted_mask = predicted_mask.view(batch_size,1,64,64)\n\n loss_mask = nn.functional.mse_loss(predicted_mask, gt_mask, reduction='none').mean()\n\n predicted_mask = nn.functional.interpolate(\n predicted_mask.detach(), size=(512,512), mode=\"bilinear\"\n )\n predicted_mask = torch.greater_equal(predicted_mask, 0.6).int()\n\n cv2.imwrite(\"./pred_mask.png\", np.array(predicted_mask[0].squeeze(0).to('cpu') * 255))\n\n # pred_img = self.decode_first_stage_with_grad(pred_x0) * predicted_mask + (1-predicted_mask) * comp_img\n\n # loss_img = nn.functional.mse_loss(pred_img, batch['jpg'].permute(0,3,1,2), reduction='none').mean()\n\n loss = loss_noise1 + loss_noise2 + loss_mask\n\n return loss\n '''\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, N):\n return self.get_learned_conditioning([\"\"] * N)\n\n @torch.no_grad()\n def log_images(self, batch, N=16, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None,\n use_ema_scope=True, mode='ddim', input=None, add_noise_strength=1, \n **kwargs):\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, _= self.get_input(batch, self.first_stage_key, bs=N)\n c_cat, c = c[\"c_concat\"][0][:N], c[\"c_crossattn\"][0][:N]\n N = min(z.shape[0], N)\n n_row = min(z.shape[0], n_row)\n log[\"reconstruction\"] = self.decode_first_stage(z)\n log[\"control\"] = c_cat * 2.0 - 1.0\n log[\"conditioning\"] = log_txt_as_img((512, 512), batch[self.cond_stage_key], size=16)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n samples, z_denoise_row = self.sample_log(cond={\"c_concat\": [c_cat], \"c_crossattn\": [c]},\n batch_size=N, mode=mode,\n ddim_steps=ddim_steps, eta=ddim_eta,\n input=input, add_noise_strength=add_noise_strength)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if unconditional_guidance_scale > 1.0:\n uc_cross = self.get_unconditional_conditioning(N)\n uc_cat = c_cat # torch.zeros_like(c_cat)\n uc_full = {\"c_concat\": [uc_cat], \"c_crossattn\": [uc_cross]}\n samples_cfg, _ = self.sample_log(cond={\"c_concat\": [c_cat], \"c_crossattn\": [c]},\n batch_size=N, mode=mode,ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc_full,\n input=input, add_noise_strength=add_noise_strength\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n return log\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, mode, ddim_steps, input, add_noise_strength, **kwargs):\n ddim_sampler = DDIMSampler(self)\n pndm_sampler = PNDMSampler(self)\n b, c, h, w = cond[\"c_concat\"][0].shape\n shape = (self.channels, h // 8, w // 8)\n if mode == 'ddim':\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs)\n elif mode == 'pndm':\n samples, intermediates = pndm_sampler.sample(ddim_steps, batch_size, shape, cond, \n verbose=False, input=input,\n strength=add_noise_strength, **kwargs)\n return samples, intermediates\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.control_model.parameters())\n if not self.sd_locked:\n params += list(self.model.diffusion_model.output_blocks.parameters())\n params += list(self.model.diffusion_model.out.parameters())\n opt = torch.optim.AdamW(params, lr=lr)\n return opt"
},
{
"identifier": "create_model",
"path": "libcom/shadow_generation/source/cldm/model.py",
"snippet": "def create_model(config_path):\n config = OmegaConf.load(config_path) if isinstance(config_path, str) else config_path\n model = instantiate_from_config(config.model).cpu()\n return model"
},
{
"identifier": "load_state_dict",
"path": "libcom/shadow_generation/source/cldm/model.py",
"snippet": "def load_state_dict(ckpt_path, location='cpu'):\n _, extension = os.path.splitext(ckpt_path)\n if extension.lower() == \".safetensors\":\n import safetensors.torch\n state_dict = safetensors.torch.load_file(ckpt_path, device=location)\n else:\n state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))\n state_dict = get_state_dict(state_dict)\n return state_dict"
},
{
"identifier": "PostProcessLogger",
"path": "libcom/shadow_generation/source/cldm/logger.py",
"snippet": "class PostProcessLogger(Callback):\n def __init__(self, batch_frequency=2000, max_images=4, log_num=1, clamp=True, increase_log_steps=True,\n rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,\n log_images_kwargs=None):\n super().__init__()\n self.rescale = rescale\n self.batch_freq = batch_frequency\n self.max_images = max_images\n self.log_num = log_num\n if not increase_log_steps:\n self.log_steps = [self.batch_freq]\n self.clamp = clamp\n self.disabled = disabled\n self.log_on_batch_idx = log_on_batch_idx\n self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}\n self.log_first_step = log_first_step\n\n @rank_zero_only\n def log_img(self, pl_module, batch, batch_idx):\n check_idx = batch_idx # if self.log_on_batch_idx else pl_module.global_step\n if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0\n hasattr(pl_module, \"get_log\")):\n log_info = pl_module.get_log(batch, batch_idx, self.log_num)\n img_size = 256\n width = len(log_info) * img_size\n height = img_size\n\n for i in range(self.log_num):\n x_offset = 0\n img_to_save = Image.new(\"RGB\", size = (width, height))\n draw = ImageDraw.Draw(img_to_save)\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuMathTeXGyre.ttf\", size=40)\n for title, imgs in log_info.items():\n img = Image.fromarray(np.array(imgs[i], dtype=np.uint8))\n img_to_save.paste(img, (x_offset, 0))\n draw.text((x_offset ,0), title, fill=\"red\", font=font)\n x_offset += img_size\n filename = \"gs-{:06}_e-{:06}_b-{:06}_{}.png\".format(pl_module.global_step, pl_module.current_epoch, batch_idx, i)\n root = os.path.join(pl_module.logger.save_dir, \"ppp_log\")\n os.makedirs(root, exist_ok=True)\n save_path = os.path.join(root, filename)\n img_to_save.save(save_path)\n\n def check_frequency(self, check_idx):\n return check_idx % self.batch_freq == 0\n\n def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n if not self.disabled:\n self.log_img(pl_module, batch, batch_idx)"
},
{
"identifier": "ResBlock",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/openaimodel.py",
"snippet": "def convert_module_to_f16(x):\ndef convert_module_to_f32(x):\n def __init__(\n self,\n spacial_dim: int,\n embed_dim: int,\n num_heads_channels: int,\n output_dim: int = None,\n ):\n def forward(self, x):\n def forward(self, x, emb):\n def forward(self, x, emb, context=None):\n def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):\n def forward(self, x):\n def __init__(self, channels, out_channels=None, ks=5):\n def forward(self,x):\n def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):\n def forward(self, x):\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x, emb):\n def _forward(self, x, emb):\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n ):\n def forward(self, x):\n def _forward(self, x):\ndef count_flops_attn(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n def convert_to_fp16(self):\n def convert_to_fp32(self):\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\nclass AttentionPool2d(nn.Module):\nclass TimestepBlock(nn.Module):\nclass TimestepEmbedSequential(nn.Sequential, TimestepBlock):\nclass Upsample(nn.Module):\nclass TransposedUpsample(nn.Module):\nclass Downsample(nn.Module):\nclass ResBlock(TimestepBlock):\nclass AttentionBlock(nn.Module):\nclass QKVAttentionLegacy(nn.Module):\nclass QKVAttention(nn.Module):\nclass UNetModel(nn.Module):"
},
{
"identifier": "checkpoint",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "conv_nd",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "linear",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)"
},
{
"identifier": "avg_pool_nd",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)"
},
{
"identifier": "timestep_embedding",
"path": "libcom/shadow_generation/source/ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "exists",
"path": "libcom/shadow_generation/source/ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
}
] | from torch import nn
from .cldm.cldm import ControlLDM
from .cldm.model import create_model, load_state_dict
from torch.utils.data import DataLoader
from .cldm.logger import PostProcessLogger
from PIL import Image
from libcom.shadow_generation.source.ldm.modules.diffusionmodules.openaimodel import (ResBlock, TimestepEmbedSequential, AttentionBlock,
Upsample, SpatialTransformer, Downsample)
from libcom.shadow_generation.source.ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from libcom.shadow_generation.source.ldm.util import exists
import torch
import pytorch_lightning as pl
import os
import cv2
import numpy as np | 5,841 | # from share import *
class Post_Process_Net(nn.Module):
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=-1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
use_spatial_transformer=False, # custom transformer support
transformer_depth=1, # custom transformer support
context_dim=None, # custom transformer support
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
legacy=True,
disable_self_attentions=None,
num_attention_blocks=None,
disable_middle_self_attn=False,
use_linear_in_transformer=False,
):
super().__init__()
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
if isinstance(num_res_blocks, int):
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
else:
if len(num_res_blocks) != len(channel_mult):
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
"as a list/tuple (per-level) with the same length as channel_mult")
self.num_res_blocks = num_res_blocks
if disable_self_attentions is not None:
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
assert len(disable_self_attentions) == len(channel_mult)
if num_attention_blocks is not None:
assert len(num_attention_blocks) == len(self.num_res_blocks)
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
f"attention will still not be set.")
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = torch.float16 if use_fp16 else torch.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.predict_codebook_ids = n_embed is not None
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
if isinstance(self.num_classes, int):
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
elif self.num_classes == "continuous":
print("setting up linear c_adm embedding layer")
self.label_emb = nn.Linear(1, time_embed_dim)
else:
raise ValueError()
self.input_blocks = nn.ModuleList(
[
| # from share import *
class Post_Process_Net(nn.Module):
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=-1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
use_spatial_transformer=False, # custom transformer support
transformer_depth=1, # custom transformer support
context_dim=None, # custom transformer support
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
legacy=True,
disable_self_attentions=None,
num_attention_blocks=None,
disable_middle_self_attn=False,
use_linear_in_transformer=False,
):
super().__init__()
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
if isinstance(num_res_blocks, int):
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
else:
if len(num_res_blocks) != len(channel_mult):
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
"as a list/tuple (per-level) with the same length as channel_mult")
self.num_res_blocks = num_res_blocks
if disable_self_attentions is not None:
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
assert len(disable_self_attentions) == len(channel_mult)
if num_attention_blocks is not None:
assert len(num_attention_blocks) == len(self.num_res_blocks)
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
f"attention will still not be set.")
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = torch.float16 if use_fp16 else torch.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.predict_codebook_ids = n_embed is not None
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
if isinstance(self.num_classes, int):
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
elif self.num_classes == "continuous":
print("setting up linear c_adm embedding layer")
self.label_emb = nn.Linear(1, time_embed_dim)
else:
raise ValueError()
self.input_blocks = nn.ModuleList(
[ | TimestepEmbedSequential( | 4 | 2023-10-19 05:08:12+00:00 | 8k |
facebookresearch/motif | rl_baseline/sample-factory/sample_factory/algorithms/appo/learner.py | [
{
"identifier": "encoders_nle",
"path": "rl_baseline/encoders_nle.py",
"snippet": "NUM_CHARS = 256\nPAD_CHAR = 0\n H = math.floor((H + 2*P - D*(K-1) - 1)/S + 1)\n W = math.floor((W + 2*P - D*(K-1) - 1)/S + 1)\n K = self.k_dim # number of input filters\n F = 3 # filter dimensions\n S = 1 # stride\n P = 1 # padding\n M = 16 # number of intermediate filters\n Y = 8 # number of output filters\n L = cfg.encoder_num_layers # number of convnet layers\ndef calc_conv_output_size(H, W, P, D, K, S, n_layers=2):\n def __init__(self, height, width, height_target, width_target):\n def forward(self, inputs, coordinates):\n def forward(self, input):\ndef _step_to_range(delta, num_steps):\n def __init__(self, cfg, obs_space, timing):\n def forward(self, obs_dict):\n def __init__(self, cfg, obs_space, timing):\n def interleave(xs, ys):\n def _select(self, embed, x):\n def forward(self, obs_dict):\nclass Crop(nn.Module):\nclass Flatten(nn.Module):\nclass NLEMainEncoder(EncoderBase):\nclass TorchBeastEncoder(EncoderBase):"
},
{
"identifier": "create_reward_model",
"path": "rlaif/reward_model.py",
"snippet": "def create_reward_model(cfg, obs_space, action_space, seq_len=1, timing=None):\n if timing is None:\n timing = Timing()\n\n def make_encoder():\n return create_encoder(cfg, obs_space, timing, cfg.reward_encoder)\n\n def make_core(encoder):\n return create_core(cfg, encoder.get_encoder_out_size(), False)\n\n if cfg.actor_critic_share_weights:\n return RewardSharedWeights(make_encoder, make_core, seq_len, action_space, cfg, timing)\n else:\n raise NotImplementedError"
}
] | import csv
import glob
import os
import re
import shutil
import signal
import threading
import time
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import gym
from collections import OrderedDict, deque
from os.path import join
from queue import Empty, Queue, Full
from threading import Thread
from typing import Tuple
from torch.nn.utils.rnn import PackedSequence, invert_permutation
from torch.multiprocessing import Process, Event as MultiprocessingEvent
from sample_factory.utils import Queue as MpQueue
from faster_fifo import Queue as MpQueue
from rl_baseline import encoders_nle
from rlaif.reward_model import create_reward_model
from sample_factory.algorithms.appo.appo_utils import TaskType, list_of_dicts_to_dict_of_lists, memory_stats, cuda_envvars_for_policy, \
TensorBatcher, iter_dicts_recursively, copy_dict_structure, ObjectPool
from sample_factory.algorithms.appo.model import create_actor_critic
from sample_factory.algorithms.appo.model_utils import create_encoder, normalize_obs
from sample_factory.algorithms.appo.aux_losses import CPCA
from sample_factory.algorithms.appo.population_based_training import PbtTask
from sample_factory.algorithms.utils.action_distributions import get_action_distribution, is_continuous_action_space
from sample_factory.algorithms.utils.algo_utils import calculate_gae, EPS
from sample_factory.algorithms.utils.pytorch_utils import to_scalar
from sample_factory.utils.decay import LinearDecay
from sample_factory.utils.timing import Timing
from sample_factory.utils.utils import log, AttrDict, experiment_dir, ensure_dir_exists, join_or_kill, safe_get, safe_put | 3,846 | if self.aux_loss_module is not None:
stats.aux_loss = var.aux_loss
stats.adv_min = var.adv.min()
stats.adv_max = var.adv.max()
stats.adv_std = var.adv_std
stats.max_abs_logprob = torch.abs(var.mb.action_logits).max()
if hasattr(var.action_distribution, 'summaries'):
stats.update(var.action_distribution.summaries())
if var.epoch == self.cfg.ppo_epochs - 1 and var.batch_num == len(var.minibatches) - 1:
# we collect these stats only for the last PPO batch, or every time if we're only doing one batch, IMPALA-style
ratio_mean = torch.abs(1.0 - var.ratio).mean().detach()
ratio_min = var.ratio.min().detach()
ratio_max = var.ratio.max().detach()
# log.debug('Learner %d ratio mean min max %.4f %.4f %.4f', self.policy_id, ratio_mean.cpu().item(), ratio_min.cpu().item(), ratio_max.cpu().item())
value_delta = torch.abs(var.values - var.old_values)
value_delta_avg, value_delta_max = value_delta.mean(), value_delta.max()
# calculate KL-divergence with the behaviour policy action distribution
old_action_distribution = get_action_distribution(
self.actor_critic.action_space, var.mb.action_logits,
)
kl_old = var.action_distribution.kl_divergence(old_action_distribution)
kl_old_mean = kl_old.mean()
stats.kl_divergence = kl_old_mean
stats.value_delta = value_delta_avg
stats.value_delta_max = value_delta_max
stats.fraction_clipped = ((var.ratio < var.clip_ratio_low).float() + (var.ratio > var.clip_ratio_high).float()).mean()
stats.ratio_mean = ratio_mean
stats.ratio_min = ratio_min
stats.ratio_max = ratio_max
stats.num_sgd_steps = var.num_sgd_steps
# this caused numerical issues on some versions of PyTorch with second moment reaching infinity
adam_max_second_moment = 0.0
for key, tensor_state in self.optimizer.state.items():
adam_max_second_moment = max(tensor_state['exp_avg_sq'].max().item(), adam_max_second_moment)
stats.adam_max_second_moment = adam_max_second_moment
version_diff = (var.curr_policy_version - var.mb.policy_version)[var.mb.policy_id == self.policy_id]
stats.version_diff_avg = version_diff.mean()
stats.version_diff_min = version_diff.min()
stats.version_diff_max = version_diff.max()
for key, value in stats.items():
stats[key] = to_scalar(value)
return stats
def _update_pbt(self):
"""To be called from the training loop, same thread that updates the model!"""
with self.pbt_mutex:
if self.load_policy_id is not None:
assert self.cfg.with_pbt
log.debug('Learner %d loads policy from %d', self.policy_id, self.load_policy_id)
self.load_from_checkpoint(self.load_policy_id)
self.load_policy_id = None
if self.new_cfg is not None:
for key, value in self.new_cfg.items():
if self.cfg[key] != value:
log.debug('Learner %d replacing cfg parameter %r with new value %r', self.policy_id, key, value)
self.cfg[key] = value
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.cfg.learning_rate
param_group['betas'] = (self.cfg.adam_beta1, self.cfg.adam_beta2)
log.debug('Updated optimizer lr to value %.7f, betas: %r', param_group['lr'], param_group['betas'])
self.new_cfg = None
@staticmethod
def load_checkpoint(checkpoints, device, checkpoint_num=0):
if len(checkpoints) <= 0:
log.warning('No checkpoints found')
return None
else:
if checkpoint_num == 0:
checkpoints = sorted(checkpoints, key=lambda x: (int(re.search(r'_(\d+)\.pth', x).group(1)), x))
latest_checkpoint = checkpoints[-1]
else:
file_id = f"_{checkpoint_num}.pth"
filtered_list = [file_name for file_name in checkpoints if file_id in file_name]
assert len(filtered_list) > 0
latest_checkpoint = filtered_list[0]
# extra safety mechanism to recover from spurious filesystem errors
num_attempts = 3
for attempt in range(num_attempts):
try:
log.warning('Loading state from checkpoint %s...', latest_checkpoint)
checkpoint_dict = torch.load(latest_checkpoint, map_location=device)
return checkpoint_dict
except Exception:
log.exception(f'Could not load from checkpoint, attempt {attempt}')
def _load_state(self, checkpoint_dict, load_progress=True):
if load_progress:
self.train_step = checkpoint_dict['train_step']
self.env_steps = checkpoint_dict['env_steps']
self.actor_critic.load_state_dict(checkpoint_dict['model'])
self.optimizer.load_state_dict(checkpoint_dict['optimizer'])
if self.aux_loss_module is not None:
self.aux_loss_module.load_state_dict(checkpoint_dict['aux_loss_module'])
log.info('Loaded experiment state at training iteration %d, env step %d', self.train_step, self.env_steps)
def init_model(self, timing):
# Load the reward model
if self.cfg.llm_reward > 0.:
checkpoints = self.get_checkpoints(join(self.cfg.reward_dir, f'checkpoint_p0'))
assert len(checkpoints) > 0
checkpoint_dict = self.load_checkpoint(checkpoints, self.device, checkpoint_num=self.cfg.checkpoint_num)
# TODO: don't save/load actor and critic weights that are not used anyways. This would avoid importing gym here.
reward_action_space = checkpoint_dict['model']['action_parameterization.distribution_linear.bias'].shape[0]
| #Copyright (c) Meta Platforms, Inc. and affiliates.
torch.autograd.set_detect_anomaly(True)
if os.name == 'nt':
else:
# noinspection PyPep8Naming
def _build_pack_info_from_dones(dones: torch.Tensor, T: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Create the indexing info needed to make the PackedSequence based on the dones.
PackedSequences are PyTorch's way of supporting a single RNN forward
call where each input in the batch can have an arbitrary sequence length
They work as follows: Given the sequences [c], [x, y, z], [a, b],
we generate data [x, a, c, y, b, z] and batch_sizes [3, 2, 1]. The
data is a flattened out version of the input sequences (the ordering in
data is determined by sequence length). batch_sizes tells you that
for each index, how many sequences have a length of (index + 1) or greater.
This method will generate the new index ordering such that you can
construct the data for a PackedSequence from a (N*T, ...) tensor
via x.index_select(0, select_inds)
"""
num_samples = len(dones)
rollout_boundaries = dones.clone().detach()
rollout_boundaries[T - 1::T] = 1 # end of each rollout is the boundary
rollout_boundaries = rollout_boundaries.nonzero(as_tuple=False).squeeze(dim=1) + 1
first_len = rollout_boundaries[0].unsqueeze(0)
if len(rollout_boundaries) <= 1:
log.debug('Only one rollout boundary. This can happen if batch size is 1, probably not during the real training.')
rollout_lengths = first_len
else:
rollout_lengths = rollout_boundaries[1:] - rollout_boundaries[:-1]
rollout_lengths = torch.cat([first_len, rollout_lengths])
rollout_starts_orig = rollout_boundaries - rollout_lengths
# done=True for the last step in the episode, so done flags rolled 1 step to the right will indicate
# first frames in the episodes
is_new_episode = dones.clone().detach().view((-1, T))
is_new_episode = is_new_episode.roll(1, 1)
# roll() is cyclical, so done=True in the last position in the rollout will roll to 0th position
# we want to avoid it here. (note to self: is there a function that does two of these things at once?)
is_new_episode[:, 0] = 0
is_new_episode = is_new_episode.view((-1, ))
lengths, sorted_indices = torch.sort(rollout_lengths, descending=True)
# We will want these on the CPU for torch.unique_consecutive,
# so move now.
cpu_lengths = lengths.to(device='cpu', non_blocking=True)
# We need to keep the original unpermuted rollout_starts, because the permutation is later applied
# internally in the RNN implementation.
# From modules/rnn.py:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
# hx = self.permute_hidden(hx, sorted_indices)
rollout_starts_sorted = rollout_starts_orig.index_select(0, sorted_indices)
select_inds = torch.empty(num_samples, device=dones.device, dtype=torch.int64)
max_length = int(cpu_lengths[0].item())
# batch_sizes is *always* on the CPU
batch_sizes = torch.empty((max_length,), device='cpu', dtype=torch.int64)
offset = 0
prev_len = 0
num_valid_for_length = lengths.size(0)
unique_lengths = torch.unique_consecutive(cpu_lengths)
# Iterate over all unique lengths in reverse as they sorted
# in decreasing order
for i in range(len(unique_lengths) - 1, -1, -1):
valids = lengths[0:num_valid_for_length] > prev_len
num_valid_for_length = int(valids.float().sum().item())
next_len = int(unique_lengths[i])
batch_sizes[prev_len:next_len] = num_valid_for_length
new_inds = (
rollout_starts_sorted[0:num_valid_for_length].view(1, num_valid_for_length)
+ torch.arange(prev_len, next_len, device=rollout_starts_sorted.device).view(next_len - prev_len, 1)
).view(-1)
# for a set of sequences [1, 2, 3], [4, 5], [6, 7], [8]
# these indices will be 1,4,6,8,2,5,7,3
# (all first steps in all trajectories, then all second steps, etc.)
select_inds[offset:offset + new_inds.numel()] = new_inds
offset += new_inds.numel()
prev_len = next_len
# Make sure we have an index for all elements
assert offset == num_samples
assert is_new_episode.shape[0] == num_samples
return rollout_starts_orig, is_new_episode, select_inds, batch_sizes, sorted_indices
def build_rnn_inputs(x, dones_cpu, rnn_states, T: int):
"""
Create a PackedSequence input for an RNN such that each
set of steps that are part of the same episode are all part of
a batch in the PackedSequence.
Use the returned select_inds and build_core_out_from_seq to invert this.
:param x: A (N*T, -1) tensor of the data to build the PackedSequence out of
:param dones_cpu: A (N*T) tensor where dones[i] == 1.0 indicates an episode is done, a CPU-bound tensor
:param rnn_states: A (N*T, -1) tensor of the rnn_hidden_states
:param T: The length of the rollout
:return: tuple(x_seq, rnn_states, select_inds)
WHERE
x_seq is the PackedSequence version of x to pass to the RNN
rnn_states are the corresponding rnn state, zeroed on the episode boundary
inverted_select_inds can be passed to build_core_out_from_seq so the RNN output can be retrieved
"""
rollout_starts, is_new_episode, select_inds, batch_sizes, sorted_indices = _build_pack_info_from_dones(dones_cpu, T)
inverted_select_inds = invert_permutation(select_inds)
def device(t):
return t.to(device=x.device)
select_inds = device(select_inds)
inverted_select_inds = device(inverted_select_inds)
sorted_indices = device(sorted_indices)
rollout_starts = device(rollout_starts)
is_new_episode = device(is_new_episode)
x_seq = PackedSequence(x.index_select(0, select_inds), batch_sizes, sorted_indices)
# We zero-out rnn states for timesteps at the beginning of the episode.
# rollout_starts are indices of all starts of sequences
# (which can be due to episode boundary or just boundary of a rollout)
# (1 - is_new_episode.view(-1, 1)).index_select(0, rollout_starts) gives us a zero for every beginning of
# the sequence that is actually also a start of a new episode, and by multiplying this RNN state by zero
# we ensure no information transfer across episode boundaries.
rnn_states = rnn_states.index_select(0, rollout_starts)
is_same_episode = (1 - is_new_episode.view(-1, 1)).index_select(0, rollout_starts)
rnn_states = rnn_states * is_same_episode
return x_seq, rnn_states, inverted_select_inds
def build_core_out_from_seq(x_seq: PackedSequence, inverted_select_inds):
return x_seq.data.index_select(0, inverted_select_inds)
class LearnerWorker:
def __init__(
self, worker_idx, policy_id, cfg, obs_space, action_space, report_queue, policy_worker_queues, shared_buffers,
policy_lock, resume_experience_collection_cv,
):
log.info('Initializing the learner %d for policy %d', worker_idx, policy_id)
self.worker_idx = worker_idx
self.policy_id = policy_id
self.cfg = cfg
# PBT-related stuff
self.should_save_model = True # set to true if we need to save the model to disk on the next training iteration
self.load_policy_id = None # non-None when we need to replace our parameters with another policy's parameters
self.pbt_mutex = None # deferred initialization
self.new_cfg = None # non-None when we need to update the learning hyperparameters
self.terminate = False
self.num_batches_processed = 0
self.obs_space = obs_space
self.action_space = action_space
self.shared_buffers = shared_buffers
# deferred initialization
self.rollout_tensors = None
self.policy_versions = None
self.stop_experience_collection = None
self.stop_experience_collection_num_msgs = self.resume_experience_collection_num_msgs = 0
self.device = None
self.actor_critic = None
self.aux_loss_module = None
self.optimizer = None
self.policy_lock = policy_lock
self.resume_experience_collection_cv = resume_experience_collection_cv
self.task_queue = MpQueue()
self.report_queue = report_queue
self.initialized_event = MultiprocessingEvent()
self.initialized_event.clear()
self.model_saved_event = MultiprocessingEvent()
self.model_saved_event.clear()
# queues corresponding to policy workers using the same policy
# we send weight updates via these queues
self.policy_worker_queues = policy_worker_queues
self.experience_buffer_queue = None # deferred initialization
self.tensor_batch_pool = self.tensor_batcher = None
self.with_training = True # set to False for debugging no-training regime
self.train_in_background = self.cfg.train_in_background_thread # set to False for debugging
self.training_thread = None
self.train_thread_initialized = None
self.is_training = False
self.train_step = self.env_steps = 0
self.cycle_count = 1
# decay rate at which summaries are collected
# save summaries every 20 seconds in the beginning, but decay to every 4 minutes in the limit, because we
# do not need frequent summaries for longer experiments
self.summary_rate_decay_seconds = LinearDecay([(0, 20), (100000, 120), (1000000, 240)])
self.last_summary_time = 0
self.last_saved_time = self.last_milestone_time = 0
self.discarded_experience_over_time = deque([], maxlen=30)
self.discarded_experience_timer = time.time()
self.num_discarded_rollouts = 0
self.process = Process(target=self._run, daemon=True)
if is_continuous_action_space(self.action_space) and self.cfg.exploration_loss == 'symmetric_kl':
raise NotImplementedError('KL-divergence exploration loss is not supported with '
'continuous action spaces. Use entropy exploration loss')
self.exploration_loss_func = None # deferred initialization
try:
reward_csv_file = f'{cfg.reward_dir}/reward_metrics/train_norm_quantiles.csv'
with open(reward_csv_file, 'r', newline='') as file:
csv_reader = csv.reader(file)
reward_quantile_info = list(csv_reader)
except:
raise FileNotFoundError('Reward quantiles file not found.')
if cfg.eps_threshold_quantile != '0.0':
quantile_index = np.where(np.array(reward_quantile_info[0]).astype(float) == cfg.eps_threshold_quantile)[0][0]
self.rew_eps_threshold = float(reward_quantile_info[-1][quantile_index])
else:
self.rew_eps_threshold = -1000
def start_process(self):
self.process.start()
def deferred_initialization(self):
self.rollout_tensors = self.shared_buffers.tensors
self.policy_versions = self.shared_buffers.policy_versions
self.stop_experience_collection = self.shared_buffers.stop_experience_collection
self.pbt_mutex = threading.Lock()
self.experience_buffer_queue = Queue()
self.tensor_batch_pool = ObjectPool()
self.tensor_batcher = TensorBatcher(self.tensor_batch_pool)
self.training_thread = Thread(target=self._train_loop) if self.train_in_background else None
self.train_thread_initialized = threading.Event()
if self.cfg.exploration_loss_coeff == 0.0:
self.exploration_loss_func = lambda action_distr, valids: 0.0
elif self.cfg.exploration_loss == 'entropy':
self.exploration_loss_func = self.entropy_exploration_loss
elif self.cfg.exploration_loss == 'symmetric_kl':
self.exploration_loss_func = self.symmetric_kl_exploration_loss
else:
raise NotImplementedError(f'{self.cfg.exploration_loss} not supported!')
def _init(self):
log.info('Waiting for the learner to initialize...')
self.train_thread_initialized.wait()
log.info('Learner %d initialized', self.worker_idx)
self.initialized_event.set()
def _terminate(self):
self.terminate = True
def _broadcast_model_weights(self):
state_dict = self.actor_critic.state_dict()
policy_version = self.train_step
log.debug('Broadcast model weights for model version %d', policy_version)
model_state = (policy_version, state_dict)
for q in self.policy_worker_queues:
q.put((TaskType.INIT_MODEL, model_state))
def _calculate_gae(self, buffer):
"""
Calculate advantages using Generalized Advantage Estimation.
This is leftover the from previous version of the algorithm.
Perhaps should be re-implemented in PyTorch tensors, similar to V-trace for uniformity.
"""
rewards = np.stack(buffer.rewards).squeeze() # [E, T]
dones = np.stack(buffer.dones).squeeze() # [E, T]
values_arr = np.stack(buffer.values).squeeze() # [E, T]
# calculating fake values for the last step in the rollout
# this will make sure that advantage of the very last action is always zero
values = []
for i in range(len(values_arr)):
last_value, last_reward = values_arr[i][-1], rewards[i, -1]
next_value = (last_value - last_reward) / self.cfg.gamma
values.append(list(values_arr[i]))
values[i].append(float(next_value)) # [T] -> [T+1]
# calculating returns and GAE
rewards = rewards.transpose((1, 0)) # [E, T] -> [T, E]
dones = dones.transpose((1, 0)) # [E, T] -> [T, E]
values = np.asarray(values).transpose((1, 0)) # [E, T+1] -> [T+1, E]
advantages, returns = calculate_gae(rewards, dones, values, self.cfg.gamma, self.cfg.gae_lambda)
# transpose tensors back to [E, T] before creating a single experience buffer
buffer.advantages = advantages.transpose((1, 0)) # [T, E] -> [E, T]
buffer.returns = returns.transpose((1, 0)) # [T, E] -> [E, T]
buffer.returns = buffer.returns[:, :, np.newaxis] # [E, T] -> [E, T, 1]
buffer.advantages = [torch.tensor(buffer.advantages).reshape(-1)]
buffer.returns = [torch.tensor(buffer.returns).reshape(-1)]
return buffer
def _prepare_train_buffer(self, rollouts, macro_batch_size, timing):
trajectories = [AttrDict(r['t']) for r in rollouts]
with timing.add_time('buffers'):
buffer = AttrDict()
# by the end of this loop the buffer is a dictionary containing lists of numpy arrays
for i, t in enumerate(trajectories):
for key, x in t.items():
if key not in buffer:
buffer[key] = []
buffer[key].append(x)
# convert lists of dict observations to a single dictionary of lists
for key, x in buffer.items():
if isinstance(x[0], (dict, OrderedDict)):
buffer[key] = list_of_dicts_to_dict_of_lists(x)
if not self.cfg.with_vtrace:
with timing.add_time('calc_gae'):
buffer = self._calculate_gae(buffer)
with timing.add_time('batching'):
# concatenate rollouts from different workers into a single batch efficiently
# that is, if we already have memory for the buffers allocated, we can just copy the data into
# existing cached tensors instead of creating new ones. This is a performance optimization.
use_pinned_memory = self.cfg.device == 'gpu'
buffer = self.tensor_batcher.cat(buffer, macro_batch_size, use_pinned_memory, timing)
with timing.add_time('buff_ready'):
self.shared_buffers.free_trajectory_buffers([r.traj_buffer_idx for r in rollouts])
with timing.add_time('tensors_gpu_float'):
device_buffer = self._copy_train_data_to_device(buffer)
with timing.add_time('squeeze'):
# will squeeze actions only in simple categorical case
tensors_to_squeeze = [
'actions', 'log_prob_actions', 'policy_version', 'policy_id', 'values',
'rewards', 'dones', 'rewards_cpu', 'dones_cpu',
]
for tensor_name in tensors_to_squeeze:
device_buffer[tensor_name].squeeze_()
# we no longer need the cached buffer, and can put it back into the pool
self.tensor_batch_pool.put(buffer)
return device_buffer
def _macro_batch_size(self, batch_size):
return self.cfg.num_batches_per_iteration * batch_size
def _process_macro_batch(self, rollouts, batch_size, timing):
macro_batch_size = self._macro_batch_size(batch_size)
assert macro_batch_size % self.cfg.rollout == 0
assert self.cfg.rollout % self.cfg.recurrence == 0
assert macro_batch_size % self.cfg.recurrence == 0
samples = env_steps = 0
for rollout in rollouts:
samples += rollout['length']
env_steps += rollout['env_steps']
with timing.add_time('prepare'):
buffer = self._prepare_train_buffer(rollouts, macro_batch_size, timing)
self.experience_buffer_queue.put((buffer, batch_size, samples, env_steps))
if not self.cfg.benchmark and self.cfg.train_in_background_thread:
# in PyTorch 1.4.0 there is an intense memory spike when the very first batch is being processed
# we wait here until this is over so we can continue queueing more batches onto a GPU without having
# a risk to run out of GPU memory
while self.num_batches_processed < 1:
# log.debug('Waiting for the first batch to be processed')
time.sleep(0.5)
def _process_rollouts(self, rollouts, timing):
# batch_size can potentially change through PBT, so we should keep it the same and pass it around
# using function arguments, instead of using global self.cfg
batch_size = self.cfg.batch_size
rollouts_in_macro_batch = self._macro_batch_size(batch_size) // self.cfg.rollout
if len(rollouts) < rollouts_in_macro_batch:
return rollouts
to_discard = 0
to_process = []
policy_version = self.train_step
for r in rollouts:
mask = r.t['policy_id'] == self.policy_id
if np.any(mask):
rollout_newest_version = r.t['policy_version'][mask].max().item()
else:
log.error(
'Learner %d got a rollout without any transitions produced by policy %d. This must be a bug.',
self.policy_id, self.policy_id,
)
log.error('Rollout policy ids: %r', r.t['policy_id'])
rollout_newest_version = policy_version - self.cfg.max_policy_lag
if policy_version - rollout_newest_version >= self.cfg.max_policy_lag:
# the entire rollout is too old, discard it!
to_discard += 1
self.shared_buffers.free_trajectory_buffers([r.traj_buffer_idx])
else:
# There is some experience in the rollout that we can learn from.
# Old experience (older than max policy lag), experience from other policies (in case of policy
# change on episode boundary), and experience from inactive agents (policy id = -1) will be masked
# out during loss calculations.
to_process.append(r)
if to_discard > 0:
log.warning(
'Discarding %d old rollouts, cut by policy lag threshold %d (learner %d)',
to_discard, self.cfg.max_policy_lag, self.policy_id,
)
rollouts = to_process
self.num_discarded_rollouts += to_discard
if len(rollouts) >= rollouts_in_macro_batch:
# process newest rollouts
rollouts_to_process = rollouts[:rollouts_in_macro_batch]
rollouts = rollouts[rollouts_in_macro_batch:]
self._process_macro_batch(rollouts_to_process, batch_size, timing)
# log.info('Unprocessed rollouts: %d (%d samples)', len(rollouts), len(rollouts) * self.cfg.rollout)
return rollouts
def _get_minibatches(self, batch_size, experience_size):
"""Generating minibatches for training."""
assert self.cfg.rollout % self.cfg.recurrence == 0
assert experience_size % batch_size == 0, f'experience size: {experience_size}, batch size: {batch_size}'
if self.cfg.num_batches_per_iteration == 1:
return [None] # single minibatch is actually the entire buffer, we don't need indices
# indices that will start the mini-trajectories from the same episode (for bptt)
indices = np.arange(0, experience_size, self.cfg.recurrence)
indices = np.random.permutation(indices)
# complete indices of mini trajectories, e.g. with recurrence==4: [4, 16] -> [4, 5, 6, 7, 16, 17, 18, 19]
indices = [np.arange(i, i + self.cfg.recurrence) for i in indices]
indices = np.concatenate(indices)
assert len(indices) == experience_size
num_minibatches = experience_size // batch_size
minibatches = np.split(indices, num_minibatches)
return minibatches
@staticmethod
def _get_minibatch(buffer, indices):
if indices is None:
# handle the case of a single batch, where the entire buffer is a minibatch
return buffer
mb = AttrDict()
for item, x in buffer.items():
if isinstance(x, (dict, OrderedDict)):
mb[item] = AttrDict()
for key, x_elem in x.items():
mb[item][key] = x_elem[indices]
else:
mb[item] = x[indices]
return mb
def _should_save_summaries(self):
summaries_every_seconds = self.summary_rate_decay_seconds.at(self.train_step)
if time.time() - self.last_summary_time < summaries_every_seconds:
return False
return True
def _after_optimizer_step(self):
"""A hook to be called after each optimizer step."""
self.train_step += 1
self._maybe_save()
def _maybe_save(self):
if self.env_steps > self.cfg.save_every_steps * self.cycle_count:
# Keep a separate condition for updating cycle_counts
self._save()
self.model_saved_event.set()
self.cycle_count += 1
@staticmethod
def checkpoint_dir(cfg, policy_id):
checkpoint_dir = join(experiment_dir(cfg=cfg), f'checkpoint_p{policy_id}')
return ensure_dir_exists(checkpoint_dir)
@staticmethod
def get_checkpoints(checkpoints_dir):
checkpoints = glob.glob(join(checkpoints_dir, 'checkpoint_*'))
return sorted(checkpoints)
def _get_checkpoint_dict(self):
checkpoint = {
'train_step': self.train_step,
'env_steps': self.env_steps,
'model': self.actor_critic.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
if self.aux_loss_module is not None:
checkpoint['aux_loss_module'] = self.aux_loss_module.state_dict()
return checkpoint
def _save(self):
checkpoint = self._get_checkpoint_dict()
assert checkpoint is not None
checkpoint_dir = self.checkpoint_dir(self.cfg, self.policy_id)
tmp_filepath = join(checkpoint_dir, 'temp_checkpoint.pth')
checkpoint_name = f'checkpoint_{self.train_step:09d}_{self.env_steps}.pth'
filepath = join(checkpoint_dir, checkpoint_name)
log.info('Saving %s...', tmp_filepath)
torch.save(checkpoint, tmp_filepath)
log.info('Renaming %s to %s', tmp_filepath, filepath)
os.rename(tmp_filepath, filepath)
while len(self.get_checkpoints(checkpoint_dir)) > self.cfg.keep_checkpoints:
oldest_checkpoint = self.get_checkpoints(checkpoint_dir)[0]
if os.path.isfile(oldest_checkpoint):
log.debug('Removing %s', oldest_checkpoint)
os.remove(oldest_checkpoint)
if self.cfg.save_milestones_sec > 0:
# milestones enabled
if time.time() - self.last_milestone_time >= self.cfg.save_milestones_sec:
milestones_dir = ensure_dir_exists(join(checkpoint_dir, 'milestones'))
milestone_path = join(milestones_dir, f'{checkpoint_name}.milestone')
log.debug('Saving a milestone %s', milestone_path)
shutil.copy(filepath, milestone_path)
self.last_milestone_time = time.time()
@staticmethod
def _policy_loss(ratio, adv, clip_ratio_low, clip_ratio_high, valids):
clipped_ratio = torch.clamp(ratio, clip_ratio_low, clip_ratio_high)
loss_unclipped = ratio * adv
loss_clipped = clipped_ratio * adv
loss = torch.min(loss_unclipped, loss_clipped)
loss = torch.masked_select(loss, valids)
loss = -loss.mean()
return loss
def _value_loss(self, new_values, old_values, target, clip_value, valids):
value_clipped = old_values + torch.clamp(new_values - old_values, -clip_value, clip_value)
value_original_loss = (new_values - target).pow(2)
value_clipped_loss = (value_clipped - target).pow(2)
value_loss = torch.max(value_original_loss, value_clipped_loss)
value_loss = torch.masked_select(value_loss, valids)
value_loss = value_loss.mean()
value_loss *= self.cfg.value_loss_coeff
return value_loss
def entropy_exploration_loss(self, action_distribution, valids):
entropy = action_distribution.entropy()
entropy = torch.masked_select(entropy, valids)
entropy_loss = -self.cfg.exploration_loss_coeff * entropy.mean()
return entropy_loss
def symmetric_kl_exploration_loss(self, action_distribution, valids):
kl_prior = action_distribution.symmetric_kl_with_uniform_prior()
kl_prior = torch.masked_select(kl_prior, valids).mean()
if not torch.isfinite(kl_prior):
kl_prior = torch.zeros(kl_prior.shape)
kl_prior = torch.clamp(kl_prior, max=30)
kl_prior_loss = self.cfg.exploration_loss_coeff * kl_prior
return kl_prior_loss
def _prepare_observations(self, obs_tensors, gpu_buffer_obs):
for d, gpu_d, k, v, _ in iter_dicts_recursively(obs_tensors, gpu_buffer_obs):
device, dtype = self.actor_critic.device_and_type_for_input_tensor(k)
tensor = v.detach().to(device, copy=True).type(dtype)
gpu_d[k] = tensor
def _copy_train_data_to_device(self, buffer):
device_buffer = copy_dict_structure(buffer)
for key, item in buffer.items():
if key == 'obs':
self._prepare_observations(item, device_buffer['obs'])
else:
device_tensor = item.detach().to(self.device, copy=True, non_blocking=True)
device_buffer[key] = device_tensor.float()
device_buffer['dones_cpu'] = buffer.dones.to('cpu', copy=True, non_blocking=True).float()
device_buffer['rewards_cpu'] = buffer.rewards.to('cpu', copy=True, non_blocking=True).float()
return device_buffer
def _train(self, gpu_buffer, batch_size, experience_size, timing):
with torch.no_grad():
policy_version_before_train = self.train_step
early_stopping_tolerance = 1e-6
early_stop = False
prev_epoch_actor_loss = 1e9
epoch_actor_losses = []
# V-trace parameters
# noinspection PyArgumentList
rho_hat = torch.Tensor([self.cfg.vtrace_rho])
# noinspection PyArgumentList
c_hat = torch.Tensor([self.cfg.vtrace_c])
clip_ratio_high = 1.0 + self.cfg.ppo_clip_ratio # e.g. 1.1
# this still works with e.g. clip_ratio = 2, while PPO's 1-r would give negative ratio
clip_ratio_low = 1.0 / clip_ratio_high
clip_value = self.cfg.ppo_clip_value
gamma = self.cfg.gamma
recurrence = self.cfg.recurrence
if self.cfg.with_vtrace:
assert recurrence == self.cfg.rollout and recurrence > 1, \
'V-trace requires to recurrence and rollout to be equal'
num_sgd_steps = 0
stats_and_summaries = None
if not self.with_training:
return stats_and_summaries
for epoch in range(self.cfg.ppo_epochs):
with timing.add_time('epoch_init'):
if early_stop or self.terminate:
break
summary_this_epoch = force_summaries = False
minibatches = self._get_minibatches(batch_size, experience_size)
for batch_num in range(len(minibatches)):
with timing.add_time('minibatch_init'):
indices = minibatches[batch_num]
# current minibatch consisting of short trajectory segments with length == recurrence
mb = self._get_minibatch(gpu_buffer, indices)
# calculate policy head outside of recurrent loop
with timing.add_time('forward_head'):
head_outputs = self.actor_critic.forward_head(mb.obs)
if self.cfg.llm_reward > 0.:
# Don't normalize 'obs' since normalization happens in-place when calling self.actor_critic.forward_head
r_head_outputs = self.reward_model.forward_head(mb.obs, normalize=False)
# initial rnn states
with timing.add_time('bptt_initial'):
if self.cfg.use_rnn:
head_output_seq, rnn_states, inverted_select_inds = build_rnn_inputs(
head_outputs, mb.dones_cpu, mb.rnn_states, recurrence,
)
else:
rnn_states = mb.rnn_states[::recurrence]
# calculate RNN outputs for each timestep in a loop
with timing.add_time('bptt'):
if self.cfg.use_rnn:
with timing.add_time('bptt_forward_core'):
core_output_seq, _ = self.actor_critic.forward_core(head_output_seq, rnn_states)
core_outputs = build_core_out_from_seq(core_output_seq, inverted_select_inds)
else:
core_outputs, _ = self.actor_critic.forward_core(head_outputs, rnn_states)
if self.cfg.llm_reward > 0.:
r_core_outputs, _ = self.reward_model.forward_core(r_head_outputs, torch.zeros_like(rnn_states))
num_trajectories = head_outputs.size(0) // recurrence
with timing.add_time('tail'):
assert core_outputs.shape[0] == head_outputs.shape[0]
# calculate policy tail outside of recurrent loop
result = self.actor_critic.forward_tail(core_outputs, with_action_distribution=True)
action_distribution = result.action_distribution
log_prob_actions = action_distribution.log_prob(mb.actions)
ratio = torch.exp(log_prob_actions - mb.log_prob_actions) # pi / pi_old
# super large/small values can cause numerical problems and are probably noise anyway
ratio = torch.clamp(ratio, 0.05, 20.0)
values = result.values.squeeze()
with torch.no_grad(): # these computations are not the part of the computation graph
# ignore experience from other agents (i.e. on episode boundary) and from inactive agents
valids = mb.policy_id == self.policy_id
# ignore experience that was older than the threshold even before training started
valids = valids & (policy_version_before_train - mb.policy_version < self.cfg.max_policy_lag)
if self.cfg.with_vtrace:
ratios_cpu = ratio.cpu()
values_cpu = values.cpu()
dones_cpu = mb.dones_cpu
if self.cfg.llm_reward > 0.:
llm_reward = self.reward_model.reward_fn(r_core_outputs)
llm_rewards_cpu = llm_reward.detach().cpu().squeeze()
if self.cfg.rew_norm:
llm_rewards_cpu = (llm_rewards_cpu - self.reward_model.mean) / (self.reward_model.var)**(1/2)
llm_rewards_cpu = (llm_rewards_cpu > self.rew_eps_threshold) * llm_rewards_cpu
msg_count = mb.obs['msg_count'].squeeze().cpu()
msg_count_coeff = 1 / (msg_count ** (self.cfg.beta_count_exponent))
else:
llm_rewards_cpu = torch.zeros_like(mb.rewards_cpu)
msg_count_coeff = 0.0
rewards_cpu = (self.cfg.extrinsic_reward * mb.rewards_cpu
+ self.cfg.llm_reward * llm_rewards_cpu * msg_count_coeff)
vtrace_rho = torch.min(rho_hat, ratios_cpu)
vtrace_c = torch.min(c_hat, ratios_cpu)
vs = torch.zeros((num_trajectories * recurrence))
adv = torch.zeros((num_trajectories * recurrence))
next_values = (values_cpu[recurrence - 1::recurrence] - rewards_cpu[recurrence - 1::recurrence]) / gamma
next_vs = next_values
with timing.add_time('vtrace'):
for i in reversed(range(self.cfg.recurrence)):
rewards = rewards_cpu[i::recurrence]
dones = dones_cpu[i::recurrence]
not_done = 1.0 - dones
not_done_times_gamma = not_done * gamma
curr_values = values_cpu[i::recurrence]
curr_vtrace_rho = vtrace_rho[i::recurrence]
curr_vtrace_c = vtrace_c[i::recurrence]
delta_s = curr_vtrace_rho * (rewards + not_done_times_gamma * next_values - curr_values)
adv[i::recurrence] = curr_vtrace_rho * (rewards + not_done_times_gamma * next_vs - curr_values)
next_vs = curr_values + delta_s + not_done_times_gamma * curr_vtrace_c * (next_vs - next_values)
vs[i::recurrence] = next_vs
next_values = curr_values
targets = vs
else:
raise NotImplementedError
adv_mean = adv.mean()
adv_std = adv.std()
adv = (adv - adv_mean) / max(1e-3, adv_std) # normalize advantage
adv = adv.to(self.device)
with timing.add_time('losses'):
policy_loss = self._policy_loss(ratio, adv, clip_ratio_low, clip_ratio_high, valids)
exploration_loss = self.exploration_loss_func(action_distribution, valids)
actor_loss = policy_loss + exploration_loss
epoch_actor_losses.append(actor_loss.item())
targets = targets.to(self.device)
old_values = mb.values
value_loss = self._value_loss(values, old_values, targets, clip_value, valids)
critic_loss = value_loss
loss = actor_loss + critic_loss
if self.aux_loss_module is not None:
with timing.add_time('aux_loss'):
aux_loss = self.aux_loss_module(
mb.actions.view(num_trajectories, recurrence, -1),
(1.0 - mb.dones).view(num_trajectories, recurrence, 1),
valids.view(num_trajectories, recurrence, -1),
head_outputs.view(num_trajectories, recurrence, -1),
core_outputs.view(num_trajectories, recurrence, -1),
)
loss = loss + aux_loss
high_loss = 30.0
if abs(to_scalar(policy_loss)) > high_loss or abs(to_scalar(value_loss)) > high_loss or abs(to_scalar(exploration_loss)) > high_loss:
log.warning(
'High loss value: %.4f %.4f %.4f %.4f (recommended to adjust the --reward_scale parameter)',
to_scalar(loss), to_scalar(policy_loss), to_scalar(value_loss), to_scalar(exploration_loss),
)
force_summaries = True
# update the weights
with timing.add_time('update'):
# following advice from https://youtu.be/9mS1fIYj1So set grad to None instead of optimizer.zero_grad()
for p in self.actor_critic.parameters():
p.grad = None
if self.aux_loss_module is not None:
for p in self.aux_loss_module.parameters():
p.grad = None
loss.backward()
if self.cfg.max_grad_norm > 0.0:
with timing.add_time('clip'):
torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.cfg.max_grad_norm)
if self.aux_loss_module is not None:
torch.nn.utils.clip_grad_norm_(self.aux_loss_module.parameters(), self.cfg.max_grad_norm)
curr_policy_version = self.train_step # policy version before the weight update
with self.policy_lock:
self.optimizer.step()
num_sgd_steps += 1
with torch.no_grad():
with timing.add_time('after_optimizer'):
self._after_optimizer_step()
# collect and report summaries
with_summaries = self._should_save_summaries() or force_summaries
if with_summaries and not summary_this_epoch:
stats_and_summaries = self._record_summaries(AttrDict(locals()))
summary_this_epoch = True
force_summaries = False
# end of an epoch
# this will force policy update on the inference worker (policy worker)
self.policy_versions[self.policy_id] = self.train_step
new_epoch_actor_loss = np.mean(epoch_actor_losses)
loss_delta_abs = abs(prev_epoch_actor_loss - new_epoch_actor_loss)
if loss_delta_abs < early_stopping_tolerance:
early_stop = True
log.debug(
'Early stopping after %d epochs (%d sgd steps), loss delta %.7f',
epoch + 1, num_sgd_steps, loss_delta_abs,
)
break
prev_epoch_actor_loss = new_epoch_actor_loss
epoch_actor_losses = []
return stats_and_summaries
def _record_summaries(self, train_loop_vars):
var = train_loop_vars
self.last_summary_time = time.time()
stats = AttrDict()
stats.valids_fraction = var.valids.float().mean()
stats.same_policy_fraction = (var.mb.policy_id == self.policy_id).float().mean()
grad_norm = sum(
p.grad.data.norm(2).item() ** 2
for p in self.actor_critic.parameters()
if p.grad is not None
) ** 0.5
stats.grad_norm = grad_norm
stats.loss = var.loss
stats.value = var.result.values.mean()
stats.entropy = var.action_distribution.entropy().mean()
stats.policy_loss = var.policy_loss
stats.value_loss = var.value_loss
stats.exploration_loss = var.exploration_loss
if self.aux_loss_module is not None:
stats.aux_loss = var.aux_loss
stats.adv_min = var.adv.min()
stats.adv_max = var.adv.max()
stats.adv_std = var.adv_std
stats.max_abs_logprob = torch.abs(var.mb.action_logits).max()
if hasattr(var.action_distribution, 'summaries'):
stats.update(var.action_distribution.summaries())
if var.epoch == self.cfg.ppo_epochs - 1 and var.batch_num == len(var.minibatches) - 1:
# we collect these stats only for the last PPO batch, or every time if we're only doing one batch, IMPALA-style
ratio_mean = torch.abs(1.0 - var.ratio).mean().detach()
ratio_min = var.ratio.min().detach()
ratio_max = var.ratio.max().detach()
# log.debug('Learner %d ratio mean min max %.4f %.4f %.4f', self.policy_id, ratio_mean.cpu().item(), ratio_min.cpu().item(), ratio_max.cpu().item())
value_delta = torch.abs(var.values - var.old_values)
value_delta_avg, value_delta_max = value_delta.mean(), value_delta.max()
# calculate KL-divergence with the behaviour policy action distribution
old_action_distribution = get_action_distribution(
self.actor_critic.action_space, var.mb.action_logits,
)
kl_old = var.action_distribution.kl_divergence(old_action_distribution)
kl_old_mean = kl_old.mean()
stats.kl_divergence = kl_old_mean
stats.value_delta = value_delta_avg
stats.value_delta_max = value_delta_max
stats.fraction_clipped = ((var.ratio < var.clip_ratio_low).float() + (var.ratio > var.clip_ratio_high).float()).mean()
stats.ratio_mean = ratio_mean
stats.ratio_min = ratio_min
stats.ratio_max = ratio_max
stats.num_sgd_steps = var.num_sgd_steps
# this caused numerical issues on some versions of PyTorch with second moment reaching infinity
adam_max_second_moment = 0.0
for key, tensor_state in self.optimizer.state.items():
adam_max_second_moment = max(tensor_state['exp_avg_sq'].max().item(), adam_max_second_moment)
stats.adam_max_second_moment = adam_max_second_moment
version_diff = (var.curr_policy_version - var.mb.policy_version)[var.mb.policy_id == self.policy_id]
stats.version_diff_avg = version_diff.mean()
stats.version_diff_min = version_diff.min()
stats.version_diff_max = version_diff.max()
for key, value in stats.items():
stats[key] = to_scalar(value)
return stats
def _update_pbt(self):
"""To be called from the training loop, same thread that updates the model!"""
with self.pbt_mutex:
if self.load_policy_id is not None:
assert self.cfg.with_pbt
log.debug('Learner %d loads policy from %d', self.policy_id, self.load_policy_id)
self.load_from_checkpoint(self.load_policy_id)
self.load_policy_id = None
if self.new_cfg is not None:
for key, value in self.new_cfg.items():
if self.cfg[key] != value:
log.debug('Learner %d replacing cfg parameter %r with new value %r', self.policy_id, key, value)
self.cfg[key] = value
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.cfg.learning_rate
param_group['betas'] = (self.cfg.adam_beta1, self.cfg.adam_beta2)
log.debug('Updated optimizer lr to value %.7f, betas: %r', param_group['lr'], param_group['betas'])
self.new_cfg = None
@staticmethod
def load_checkpoint(checkpoints, device, checkpoint_num=0):
if len(checkpoints) <= 0:
log.warning('No checkpoints found')
return None
else:
if checkpoint_num == 0:
checkpoints = sorted(checkpoints, key=lambda x: (int(re.search(r'_(\d+)\.pth', x).group(1)), x))
latest_checkpoint = checkpoints[-1]
else:
file_id = f"_{checkpoint_num}.pth"
filtered_list = [file_name for file_name in checkpoints if file_id in file_name]
assert len(filtered_list) > 0
latest_checkpoint = filtered_list[0]
# extra safety mechanism to recover from spurious filesystem errors
num_attempts = 3
for attempt in range(num_attempts):
try:
log.warning('Loading state from checkpoint %s...', latest_checkpoint)
checkpoint_dict = torch.load(latest_checkpoint, map_location=device)
return checkpoint_dict
except Exception:
log.exception(f'Could not load from checkpoint, attempt {attempt}')
def _load_state(self, checkpoint_dict, load_progress=True):
if load_progress:
self.train_step = checkpoint_dict['train_step']
self.env_steps = checkpoint_dict['env_steps']
self.actor_critic.load_state_dict(checkpoint_dict['model'])
self.optimizer.load_state_dict(checkpoint_dict['optimizer'])
if self.aux_loss_module is not None:
self.aux_loss_module.load_state_dict(checkpoint_dict['aux_loss_module'])
log.info('Loaded experiment state at training iteration %d, env step %d', self.train_step, self.env_steps)
def init_model(self, timing):
# Load the reward model
if self.cfg.llm_reward > 0.:
checkpoints = self.get_checkpoints(join(self.cfg.reward_dir, f'checkpoint_p0'))
assert len(checkpoints) > 0
checkpoint_dict = self.load_checkpoint(checkpoints, self.device, checkpoint_num=self.cfg.checkpoint_num)
# TODO: don't save/load actor and critic weights that are not used anyways. This would avoid importing gym here.
reward_action_space = checkpoint_dict['model']['action_parameterization.distribution_linear.bias'].shape[0] | self.reward_model = create_reward_model(self.cfg, self.obs_space, gym.spaces.Discrete(reward_action_space), timing=timing) | 1 | 2023-10-24 17:45:26+00:00 | 8k |
pgorecki/lato | examples/example3/lagom_integration.py | [
{
"identifier": "Application",
"path": "lato/application.py",
"snippet": "class Application(ApplicationModule):\n dependency_provider_class = SimpleDependencyProvider\n\n def __init__(self, name=__name__, dependency_provider=None, **kwargs):\n super().__init__(name)\n self.dependency_provider = (\n dependency_provider or self.dependency_provider_class(**kwargs)\n )\n self._transaction_context_factory = None\n self._on_enter_transaction_context = lambda ctx: None\n self._on_exit_transaction_context = lambda ctx, exception=None: None\n self._transaction_middlewares = []\n self._composers: dict[str | Task, Callable] = {}\n\n def get_dependency(self, identifier: Any) -> Any:\n \"\"\"Get a dependency from the dependency provider\"\"\"\n return self.dependency_provider.get_dependency(identifier)\n\n def __getitem__(self, item) -> Any:\n return self.get_dependency(item)\n\n def call(self, func: Callable | str, *args, **kwargs):\n if isinstance(func, str):\n try:\n func = next(self.iterate_handlers_for(alias=func))\n except StopIteration:\n raise ValueError(f\"Handler not found\", func)\n\n with self.transaction_context() as ctx:\n result = ctx.call(func, *args, **kwargs)\n return result\n\n def execute(self, task: Task) -> tuple[Any, ...]:\n with self.transaction_context() as ctx:\n results = ctx.execute(task)\n return results\n\n def query(self, task: Task) -> Any:\n results = self.execute(task)\n alias = task.__class__\n composer = self._composers.get(alias, compose)\n return composer(results)\n\n def emit(self, event: Event) -> dict[Callable, Any]:\n with self.transaction_context() as ctx:\n result = ctx.emit(event)\n return result\n\n def on_enter_transaction_context(self, func):\n \"\"\"\n Decorator for registering a function to be called when entering a transaction context\n\n :param func:\n :return:\n \"\"\"\n self._on_enter_transaction_context = func\n return func\n\n def on_exit_transaction_context(self, func):\n \"\"\"\n Decorator for registering a function to be called when exiting a transaction context\n\n :param func:\n :return:\n \"\"\"\n self._on_exit_transaction_context = func\n return func\n\n def on_create_transaction_context(self, func):\n \"\"\"\n Decorator for overrinding default transaction context creation\n\n :param func:\n :return:\n \"\"\"\n self._transaction_context_factory = func\n return func\n\n def transaction_middleware(self, middleware_func):\n \"\"\"\n Decorator for registering a middleware function to be called when executing a function in a transaction context\n :param middleware_func:\n :return:\n \"\"\"\n self._transaction_middlewares.insert(0, middleware_func)\n return middleware_func\n\n def compose(self, alias):\n \"\"\"\n Decorator for composing results of tasks\n \"\"\"\n\n def decorator(func):\n self._composers[alias] = func\n return func\n\n return decorator\n\n def transaction_context(self, **dependencies) -> TransactionContext:\n \"\"\"\n Creates a transaction context with the application dependencies\n\n :param dependencies:\n :return:\n \"\"\"\n if self._transaction_context_factory:\n ctx = self._transaction_context_factory(**dependencies)\n else:\n dp = self.dependency_provider.copy(**dependencies)\n ctx = TransactionContext(dependency_provider=dp)\n\n ctx.configure(\n on_enter_transaction_context=self._on_enter_transaction_context,\n on_exit_transaction_context=self._on_exit_transaction_context,\n middlewares=self._transaction_middlewares,\n handlers_iterator=self.iterate_handlers_for,\n )\n return ctx"
},
{
"identifier": "DependencyProvider",
"path": "lato/dependency_provider.py",
"snippet": "class DependencyProvider(ABC):\n allow_names = True\n allow_types = True\n \"\"\"\n A dependency provider that provides dependencies and helps in automatic\n dependency injection based on type or parameter name.\n \"\"\"\n\n @abstractmethod\n def has_dependency(self, identifier: str | type) -> bool:\n \"\"\"\n Check if a dependency with the given identifier exists.\n\n :param identifier: Identifier for the dependency\n :return: True if the dependency exists, otherwise False\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def register_dependency(self, identifier: str | type, dependency: Any):\n \"\"\"\n Register a dependency with a given identifier (name or type).\n\n :param identifier: The name or type to be used as an identifier for the dependency\n :param dependency: The actual dependency\n \"\"\"\n raise NotImplementedError()\n\n def get_dependency(self, identifier: str | type) -> Any:\n \"\"\"\n Retrieve a dependency using its identifier (name or type).\n\n :param identifier: Identifier for the dependency\n :return: The associated dependency\n \"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def is_instance_of_custom_class(x):\n \"\"\"\n Check if x is an instance of a custom (user-defined) class.\n\n :param x: Object to check\n :return: True if x is an instance of a custom class, otherwise False\n \"\"\"\n return hasattr(x, \"__class__\")\n\n @abstractmethod\n def copy(self, *args, **kwargs) -> \"DependencyProvider\":\n \"\"\"\n Create a copy of self with updated dependencies.\n :param args: typed overrides\n :param kwargs: named overrides\n :return: A copy of the dependency provider\n \"\"\"\n\n def update(self, *args, **kwargs):\n \"\"\"\n Update the dependency provider with new dependencies.\n\n :param args: Class instances to be updated by types\n :param kwargs: Dependencies to be registered by types and with explicit names\n \"\"\"\n if self.allow_types:\n for value in args:\n t, v = self._get_type_and_value(value)\n self.register_dependency(t, v)\n\n for name, value in kwargs.items():\n t, v = self._get_type_and_value(value)\n if self.allow_names:\n self.register_dependency(name, v)\n if self.allow_types:\n self.register_dependency(t, v)\n\n def _get_type_and_value(self, value):\n if isinstance(value, TypedDependency):\n return value.a_type, value.value\n return type(value), value\n\n def _resolve_arguments(\n self, function_parameters: OrderedDict, overrides: dict[str, Any]\n ) -> dict[str, Any]:\n \"\"\"\n Resolve given function parameters to their corresponding dependencies.\n\n :param function_parameters: Parameters of the function\n :param overrides: Manual overrides for dependencies\n :return: A dictionary of resolved dependencies\n \"\"\"\n\n def _resolve(identifier, overrides):\n if identifier in overrides:\n return overrides[identifier]\n return self.get_dependency(identifier)\n\n kwargs = {}\n for param_name, param_type in function_parameters.items():\n # first, try to resolve by type\n if param_type == inspect.Parameter.empty:\n try:\n kwargs[param_name] = _resolve(param_type, overrides)\n continue\n except (ValueError, KeyError):\n pass\n # then, try to resolve by name\n try:\n kwargs[param_name] = _resolve(param_name, overrides)\n continue\n except (ValueError, KeyError):\n pass\n\n return kwargs\n\n def resolve_func_params(\n self,\n func: Callable,\n func_args: Any = None,\n func_kwargs: Any = None,\n ) -> dict[str, Any]:\n \"\"\"\n Resolve function parameters by providing necessary kwargs to call the function.\n\n :param func: The function to get arguments for\n :param func_args: Positional arguments to the function\n :param func_kwargs: Keyword arguments to the function\n :return: A dictionary of keyword arguments\n \"\"\"\n\n if func_args is None:\n func_args = []\n if func_kwargs is None:\n func_kwargs = {}\n\n func_parameters = get_function_parameters(func)\n resolved_kwargs = OrderedDict()\n arg_idx = 0\n for param_name, param_type in func_parameters.items():\n if arg_idx < len(func_args):\n resolved_kwargs[param_name] = func_args[arg_idx]\n arg_idx += 1\n continue\n\n if param_name in func_kwargs:\n resolved_kwargs[param_name] = func_kwargs[param_name]\n elif param_type != inspect.Parameter.empty and self.has_dependency(\n param_type\n ):\n resolved_kwargs[param_name] = self.get_dependency(param_type)\n elif self.has_dependency(param_name):\n resolved_kwargs[param_name] = self.get_dependency(param_name)\n\n return resolved_kwargs\n\n def __getitem__(self, key):\n return self.get_dependency(key)\n\n def __setitem__(self, key, value):\n self.register_dependency(key, value)"
},
{
"identifier": "TransactionContext",
"path": "lato/transaction_context.py",
"snippet": "class TransactionContext:\n \"\"\"A context spanning a single transaction for execution of a function\"\"\"\n\n dependency_provider_factory = SimpleDependencyProvider\n\n def __init__(\n self, dependency_provider: DependencyProvider | None = None, *args, **kwargs\n ):\n self.dependency_provider = (\n dependency_provider or self.dependency_provider_factory(*args, **kwargs)\n )\n self.resolved_kwargs: dict[str, Any] = {}\n self.current_action: tuple[str | Message, Any] | None = None\n self._on_enter_transaction_context = lambda ctx: None\n self._on_exit_transaction_context = lambda ctx, exception=None: None\n self._middlewares: list[Callable] = []\n self._handlers_iterator: Iterator = lambda alias: iter([])\n\n def configure(\n self,\n on_enter_transaction_context=None,\n on_exit_transaction_context=None,\n middlewares=None,\n handlers_iterator=None,\n ):\n if on_enter_transaction_context:\n self._on_enter_transaction_context = on_enter_transaction_context\n if on_exit_transaction_context:\n self._on_exit_transaction_context = on_exit_transaction_context\n if middlewares:\n self._middlewares = middlewares\n if handlers_iterator:\n self._handlers_iterator = handlers_iterator\n\n def begin(self):\n \"\"\"Should be used to start a transaction\"\"\"\n self._on_enter_transaction_context(self)\n\n def end(self, exception=None):\n \"\"\"Should be used to commit/end a transaction\"\"\"\n self._on_exit_transaction_context(self, exception)\n\n def iterate_handlers_for(self, alias: str):\n yield from self._handlers_iterator(alias)\n\n def __enter__(self):\n self.begin()\n return self\n\n def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):\n self.end(exc_val)\n\n def _wrap_with_middlewares(self, handler_func):\n p = handler_func\n for middleware in self._middlewares:\n p = partial(middleware, self, p)\n return p\n\n def call(self, func: Callable, *func_args: Any, **func_kwargs: Any) -> Any:\n \"\"\"\n Call a function with the given arguments and keyword arguments.\n Any dependencies will be resolved from the dependency provider.\n \"\"\"\n self.dependency_provider.update(ctx=as_type(self, TransactionContext))\n\n resolved_kwargs = self.dependency_provider.resolve_func_params(\n func, func_args, func_kwargs\n )\n self.resolved_kwargs.update(resolved_kwargs)\n p = partial(func, **resolved_kwargs)\n wrapped_handler = self._wrap_with_middlewares(p)\n result = wrapped_handler()\n return result\n\n def execute(self, task: Task) -> tuple[Any, ...]:\n results = self.emit(task)\n values = tuple(results.values())\n if len(values) == 0:\n raise ValueError(\"No handlers found for task\", task)\n return values\n\n def emit(self, message: str | Message, *args, **kwargs) -> dict[Callable, Any]:\n \"\"\"Emit a message by calling all handlers for that message\"\"\"\n alias = type(message) if isinstance(message, Message) else message\n\n if isinstance(message, Message):\n args = (message, *args)\n\n all_results = OrderedDict()\n for handler in self._handlers_iterator(alias):\n # FIXME: push and pop current action instead of setting it\n self.current_action = (message, handler)\n result = self.call(handler, *args, **kwargs)\n all_results[handler] = result\n return all_results\n\n def get_dependency(self, identifier: Any) -> Any:\n \"\"\"Get a dependency from the dependency provider\"\"\"\n return self.dependency_provider.get_dependency(identifier)\n\n def set_dependency(self, identifier: Any, dependency: Any) -> None:\n \"\"\"Set a dependency in the dependency provider\"\"\"\n self.dependency_provider.register_dependency(identifier, dependency)\n\n def __getitem__(self, item) -> Any:\n return self.get_dependency(item)"
},
{
"identifier": "as_type",
"path": "lato/dependency_provider.py",
"snippet": "def as_type(obj: Any, cls: type) -> TypedDependency:\n return TypedDependency(obj, cls)"
}
] | import uuid
import lagom.exceptions
from lagom import Container
from lato import Application, DependencyProvider, TransactionContext
from lato.dependency_provider import as_type | 3,635 |
class CorrelationId(uuid.UUID):
pass
class Name(str):
pass
class Session:
...
class Repository:
def __init__(self, session: Session):
self.session = session
class Engine:
def __init__(self, url):
self.url = url
def create_sesson(self):
return Session()
application_container = Container()
application_container[Name] = "Foo"
application_container[Engine] = Engine("sqlite:///:memory:")
class LagomDependencyProvider(DependencyProvider):
allow_names = False
def __init__(self, lagom_container):
self.container = lagom_container
def has_dependency(self, identifier: str | type) -> bool:
if type(identifier) is str:
return False
return identifier in self.container.defined_types
def register_dependency(self, identifier, dependency):
if type(identifier) is str:
raise ValueError(
f"Lagom container does not support string identifiers: {identifier}"
)
try:
self.container[identifier] = dependency
except lagom.exceptions.DuplicateDefinition:
pass
def get_dependency(self, identifier):
if type(identifier) is str:
raise ValueError(
f"Lagom container does not support string identifiers: {identifier}"
)
return self.container[identifier]
def copy(self, *args, **kwargs) -> DependencyProvider:
dp = LagomDependencyProvider(self.container.clone())
dp.update(*args, **kwargs)
return dp
dp1 = LagomDependencyProvider(application_container)
# make a copy
dp2 = dp1.copy()
# make sure that the original and the copy are the same
assert dp1[Name] == dp2[Name] == "Foo"
assert dp1[Engine] is dp2[Engine]
# create a copy with overriden value
dp3 = dp1.copy(name=as_type("Bar", Name)) # not yet implemented
# make sure that the original was not overriden
assert dp3[Name] == "Bar" and dp1[Name] == "Foo"
|
class CorrelationId(uuid.UUID):
pass
class Name(str):
pass
class Session:
...
class Repository:
def __init__(self, session: Session):
self.session = session
class Engine:
def __init__(self, url):
self.url = url
def create_sesson(self):
return Session()
application_container = Container()
application_container[Name] = "Foo"
application_container[Engine] = Engine("sqlite:///:memory:")
class LagomDependencyProvider(DependencyProvider):
allow_names = False
def __init__(self, lagom_container):
self.container = lagom_container
def has_dependency(self, identifier: str | type) -> bool:
if type(identifier) is str:
return False
return identifier in self.container.defined_types
def register_dependency(self, identifier, dependency):
if type(identifier) is str:
raise ValueError(
f"Lagom container does not support string identifiers: {identifier}"
)
try:
self.container[identifier] = dependency
except lagom.exceptions.DuplicateDefinition:
pass
def get_dependency(self, identifier):
if type(identifier) is str:
raise ValueError(
f"Lagom container does not support string identifiers: {identifier}"
)
return self.container[identifier]
def copy(self, *args, **kwargs) -> DependencyProvider:
dp = LagomDependencyProvider(self.container.clone())
dp.update(*args, **kwargs)
return dp
dp1 = LagomDependencyProvider(application_container)
# make a copy
dp2 = dp1.copy()
# make sure that the original and the copy are the same
assert dp1[Name] == dp2[Name] == "Foo"
assert dp1[Engine] is dp2[Engine]
# create a copy with overriden value
dp3 = dp1.copy(name=as_type("Bar", Name)) # not yet implemented
# make sure that the original was not overriden
assert dp3[Name] == "Bar" and dp1[Name] == "Foo"
| app = Application(dependency_provider=LagomDependencyProvider(application_container)) | 0 | 2023-10-21 11:33:05+00:00 | 8k |
NVIDIA/trt-llm-rag-windows | app.py | [
{
"identifier": "TrtLlmAPI",
"path": "trt_llama_api.py",
"snippet": "class TrtLlmAPI(CustomLLM):\n model_path: Optional[str] = Field(\n description=\"The path to the trt engine.\"\n )\n temperature: float = Field(description=\"The temperature to use for sampling.\")\n max_new_tokens: int = Field(description=\"The maximum number of tokens to generate.\")\n context_window: int = Field(\n description=\"The maximum number of context tokens for the model.\"\n )\n messages_to_prompt: Callable = Field(\n description=\"The function to convert messages to a prompt.\", exclude=True\n )\n completion_to_prompt: Callable = Field(\n description=\"The function to convert a completion to a prompt.\", exclude=True\n )\n generate_kwargs: Dict[str, Any] = Field(\n default_factory=dict, description=\"Kwargs used for generation.\"\n )\n model_kwargs: Dict[str, Any] = Field(\n default_factory=dict, description=\"Kwargs used for model initialization.\"\n )\n verbose: bool = Field(description=\"Whether to print verbose output.\")\n\n _model: Any = PrivateAttr()\n _model_config: Any = PrivateAttr()\n _tokenizer: Any = PrivateAttr()\n _max_new_tokens = PrivateAttr()\n _sampling_config = PrivateAttr()\n _verbose = PrivateAttr()\n\n def __init__(\n self,\n model_path: Optional[str] = None,\n engine_name: Optional[str] = None,\n tokenizer_dir: Optional[str] = None,\n temperature: float = 0.1,\n max_new_tokens: int = DEFAULT_NUM_OUTPUTS,\n context_window: int = DEFAULT_CONTEXT_WINDOW,\n messages_to_prompt: Optional[Callable] = None,\n completion_to_prompt: Optional[Callable] = None,\n callback_manager: Optional[CallbackManager] = None,\n generate_kwargs: Optional[Dict[str, Any]] = None,\n model_kwargs: Optional[Dict[str, Any]] = None,\n verbose: bool = False\n ) -> None:\n\n model_kwargs = model_kwargs or {}\n model_kwargs.update({\"n_ctx\": context_window, \"verbose\": verbose})\n self._max_new_tokens = max_new_tokens\n self._verbose = verbose\n # check if model is cached\n if model_path is not None:\n if not os.path.exists(model_path):\n raise ValueError(\n \"Provided model path does not exist. \"\n \"Please check the path or provide a model_url to download.\"\n )\n else:\n engine_dir = model_path\n engine_dir_path = Path(engine_dir)\n config_path = engine_dir_path / 'config.json'\n\n # config function\n with open(config_path, 'r') as f:\n config = json.load(f)\n use_gpt_attention_plugin = config['plugin_config']['gpt_attention_plugin']\n remove_input_padding = config['plugin_config']['remove_input_padding']\n tp_size = config['builder_config']['tensor_parallel']\n pp_size = config['builder_config']['pipeline_parallel']\n world_size = tp_size * pp_size\n assert world_size == tensorrt_llm.mpi_world_size(), \\\n f'Engine world size ({world_size}) != Runtime world size ({tensorrt_llm.mpi_world_size()})'\n num_heads = config['builder_config']['num_heads'] // tp_size\n hidden_size = config['builder_config']['hidden_size'] // tp_size\n vocab_size = config['builder_config']['vocab_size']\n num_layers = config['builder_config']['num_layers']\n num_kv_heads = config['builder_config'].get('num_kv_heads', num_heads)\n paged_kv_cache = config['plugin_config']['paged_kv_cache']\n if config['builder_config'].get('multi_query_mode', False):\n tensorrt_llm.logger.warning(\n \"`multi_query_mode` config is deprecated. Please rebuild the engine.\"\n )\n num_kv_heads = 1\n num_kv_heads = (num_kv_heads + tp_size - 1) // tp_size\n\n self._model_config = ModelConfig(num_heads=num_heads,\n num_kv_heads=num_kv_heads,\n hidden_size=hidden_size,\n vocab_size=vocab_size,\n num_layers=num_layers,\n gpt_attention_plugin=use_gpt_attention_plugin,\n paged_kv_cache=paged_kv_cache,\n remove_input_padding=remove_input_padding)\n\n assert pp_size == 1, 'Python runtime does not support pipeline parallelism'\n world_size = tp_size * pp_size\n\n runtime_rank = tensorrt_llm.mpi_rank()\n runtime_mapping = tensorrt_llm.Mapping(world_size,\n runtime_rank,\n tp_size=tp_size,\n pp_size=pp_size)\n torch.cuda.set_device(runtime_rank % runtime_mapping.gpus_per_node)\n self._tokenizer = LlamaTokenizer.from_pretrained(tokenizer_dir, legacy=False)\n self._sampling_config = SamplingConfig(end_id=EOS_TOKEN,\n pad_id=PAD_TOKEN,\n num_beams=1,\n temperature=temperature)\n\n serialize_path = engine_dir_path / engine_name\n with open(serialize_path, 'rb') as f:\n engine_buffer = f.read()\n decoder = tensorrt_llm.runtime.GenerationSession(self._model_config,\n engine_buffer,\n runtime_mapping,\n debug_mode=False)\n self._model = decoder\n messages_to_prompt = messages_to_prompt or generic_messages_to_prompt\n completion_to_prompt = completion_to_prompt or (lambda x: x)\n\n generate_kwargs = generate_kwargs or {}\n generate_kwargs.update(\n {\"temperature\": temperature, \"max_tokens\": max_new_tokens}\n )\n\n super().__init__(\n model_path=model_path,\n temperature=temperature,\n context_window=context_window,\n max_new_tokens=max_new_tokens,\n messages_to_prompt=messages_to_prompt,\n completion_to_prompt=completion_to_prompt,\n callback_manager=callback_manager,\n generate_kwargs=generate_kwargs,\n model_kwargs=model_kwargs,\n verbose=verbose,\n )\n\n @classmethod\n def class_name(cls) -> str:\n \"\"\"Get class name.\"\"\"\n return \"TrtLlmAPI\"\n\n @property\n def metadata(self) -> LLMMetadata:\n \"\"\"LLM metadata.\"\"\"\n return LLMMetadata(\n context_window=self.context_window,\n num_output=self.max_new_tokens,\n model_name=self.model_path,\n )\n\n @llm_chat_callback()\n def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:\n prompt = self.messages_to_prompt(messages)\n completion_response = self.complete(prompt, formatted=True, **kwargs)\n return completion_response_to_chat_response(completion_response)\n\n @llm_completion_callback()\n def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:\n self.generate_kwargs.update({\"stream\": False})\n\n is_formatted = kwargs.pop(\"formatted\", False)\n if not is_formatted:\n prompt = self.completion_to_prompt(prompt)\n\n input_text = prompt\n input_ids, input_lengths = self.parse_input(input_text, self._tokenizer,\n EOS_TOKEN,\n self._model_config)\n\n max_input_length = torch.max(input_lengths).item()\n self._model.setup(input_lengths.size(0), max_input_length, self._max_new_tokens, 1) # beam size is set to 1\n if self._verbose:\n start_time = time.time()\n\n output_ids = self._model.decode(input_ids, input_lengths, self._sampling_config)\n torch.cuda.synchronize()\n\n elapsed_time = None\n if self._verbose:\n end_time = time.time()\n elapsed_time = end_time - start_time\n\n\n output_txt, output_token_ids = self.get_output(output_ids,\n input_lengths,\n self._max_new_tokens,\n self._tokenizer)\n\n if self._verbose:\n print(f\"Input context length : {input_ids.shape[1]}\")\n print(f\"Inference time : {elapsed_time:.2f} seconds\")\n print(f\"Output context length : {len(output_token_ids)} \")\n print(f\"Inference token/sec : {(len(output_token_ids) / elapsed_time):2f}\")\n\n # call garbage collected after inference\n torch.cuda.empty_cache()\n gc.collect()\n\n return CompletionResponse(text=output_txt, raw=self.generate_completion_dict(output_txt))\n\n def parse_input(self, input_text: str, tokenizer, end_id: int,\n remove_input_padding: bool):\n input_tokens = []\n\n input_tokens.append(\n tokenizer.encode(input_text, add_special_tokens=False))\n\n input_lengths = torch.tensor([len(x) for x in input_tokens],\n dtype=torch.int32,\n device='cuda')\n if remove_input_padding:\n input_ids = np.concatenate(input_tokens)\n input_ids = torch.tensor(input_ids, dtype=torch.int32,\n device='cuda').unsqueeze(0)\n else:\n input_ids = torch.nested.to_padded_tensor(\n torch.nested.nested_tensor(input_tokens, dtype=torch.int32),\n end_id).cuda()\n\n return input_ids, input_lengths\n\n def remove_extra_eos_ids(self, outputs):\n outputs.reverse()\n while outputs and outputs[0] == 2:\n outputs.pop(0)\n outputs.reverse()\n outputs.append(2)\n return outputs\n\n def get_output(self, output_ids, input_lengths, max_output_len, tokenizer):\n num_beams = output_ids.size(1)\n output_text = \"\"\n outputs = None\n for b in range(input_lengths.size(0)):\n for beam in range(num_beams):\n output_begin = input_lengths[b]\n output_end = input_lengths[b] + max_output_len\n outputs = output_ids[b][beam][output_begin:output_end].tolist()\n outputs = self.remove_extra_eos_ids(outputs)\n output_text = tokenizer.decode(outputs)\n\n return output_text, outputs\n\n def generate_completion_dict(self, text_str):\n \"\"\"\n Generate a dictionary for text completion details.\n Returns:\n dict: A dictionary containing completion details.\n \"\"\"\n completion_id: str = f\"cmpl-{str(uuid.uuid4())}\"\n created: int = int(time.time())\n model_name: str = self._model if self._model is not None else self.model_path\n return {\n \"id\": completion_id,\n \"object\": \"text_completion\",\n \"created\": created,\n \"model\": model_name,\n \"choices\": [\n {\n \"text\": text_str,\n \"index\": 0,\n \"logprobs\": None,\n \"finish_reason\": 'stop'\n }\n ],\n \"usage\": {\n \"prompt_tokens\": None,\n \"completion_tokens\": None,\n \"total_tokens\": None\n }\n }\n\n @llm_completion_callback()\n def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:\n pass"
},
{
"identifier": "FaissEmbeddingStorage",
"path": "faiss_vector_storage.py",
"snippet": "class FaissEmbeddingStorage:\n\n def __init__(self, data_dir, dimension=384):\n self.d = dimension\n self.data_dir = data_dir\n self.index = self.initialize_index()\n\n def initialize_index(self):\n if os.path.exists(\"storage-default\") and os.listdir(\"storage-default\"):\n print(\"Using the presisted value\")\n vector_store = FaissVectorStore.from_persist_dir(\"storage-default\")\n storage_context = StorageContext.from_defaults(\n vector_store=vector_store, persist_dir=\"storage-default\"\n )\n index = load_index_from_storage(storage_context=storage_context)\n return index\n else:\n print(\"generating new values\")\n documents = SimpleDirectoryReader(self.data_dir).load_data()\n faiss_index = faiss.IndexFlatL2(self.d)\n vector_store = FaissVectorStore(faiss_index=faiss_index)\n storage_context = StorageContext.from_defaults(vector_store=vector_store)\n index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n index.storage_context.persist(persist_dir = \"storage-default\")\n return index\n\n def get_query_engine(self):\n return self.index.as_query_engine()"
}
] | import time
import gradio as gr
import argparse
from trt_llama_api import TrtLlmAPI #llama_index does not currently support TRT-LLM. The trt_llama_api.py file defines a llama_index compatible interface for TRT-LLM.
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, ServiceContext
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
from llama_index import set_global_service_context
from faiss_vector_storage import FaissEmbeddingStorage | 3,669 | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Create an argument parser
parser = argparse.ArgumentParser(description='NVIDIA Chatbot Parameters')
# Add arguments
parser.add_argument('--trt_engine_path', type=str, required=True,
help="Path to the TensorRT engine.", default="")
parser.add_argument('--trt_engine_name', type=str, required=True,
help="Name of the TensorRT engine.", default="")
parser.add_argument('--tokenizer_dir_path', type=str, required=True,
help="Directory path for the tokenizer.", default="")
parser.add_argument('--embedded_model', type=str,
help="Name or path of the embedded model. Defaults to 'sentence-transformers/all-MiniLM-L6-v2' if "
"not provided.",
default='sentence-transformers/all-MiniLM-L6-v2')
parser.add_argument('--data_dir', type=str, required=False,
help="Directory path for data.", default="./dataset")
parser.add_argument('--verbose', type=bool, required=False,
help="Enable verbose logging.", default=False)
# Parse the arguments
args = parser.parse_args()
# Use the provided arguments
trt_engine_path = args.trt_engine_path
trt_engine_name = args.trt_engine_name
tokenizer_dir_path = args.tokenizer_dir_path
embedded_model = args.embedded_model
data_dir = args.data_dir
verbose = args.verbose
# create trt_llm engine object
llm = TrtLlmAPI(
model_path=trt_engine_path,
engine_name=trt_engine_name,
tokenizer_dir=tokenizer_dir_path,
temperature=0.1,
max_new_tokens=1024,
context_window=3900,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=False
)
# create embeddings model object
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name=embedded_model))
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
set_global_service_context(service_context)
# load the vectorstore index
| # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Create an argument parser
parser = argparse.ArgumentParser(description='NVIDIA Chatbot Parameters')
# Add arguments
parser.add_argument('--trt_engine_path', type=str, required=True,
help="Path to the TensorRT engine.", default="")
parser.add_argument('--trt_engine_name', type=str, required=True,
help="Name of the TensorRT engine.", default="")
parser.add_argument('--tokenizer_dir_path', type=str, required=True,
help="Directory path for the tokenizer.", default="")
parser.add_argument('--embedded_model', type=str,
help="Name or path of the embedded model. Defaults to 'sentence-transformers/all-MiniLM-L6-v2' if "
"not provided.",
default='sentence-transformers/all-MiniLM-L6-v2')
parser.add_argument('--data_dir', type=str, required=False,
help="Directory path for data.", default="./dataset")
parser.add_argument('--verbose', type=bool, required=False,
help="Enable verbose logging.", default=False)
# Parse the arguments
args = parser.parse_args()
# Use the provided arguments
trt_engine_path = args.trt_engine_path
trt_engine_name = args.trt_engine_name
tokenizer_dir_path = args.tokenizer_dir_path
embedded_model = args.embedded_model
data_dir = args.data_dir
verbose = args.verbose
# create trt_llm engine object
llm = TrtLlmAPI(
model_path=trt_engine_path,
engine_name=trt_engine_name,
tokenizer_dir=tokenizer_dir_path,
temperature=0.1,
max_new_tokens=1024,
context_window=3900,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=False
)
# create embeddings model object
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name=embedded_model))
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
set_global_service_context(service_context)
# load the vectorstore index | faiss_storage = FaissEmbeddingStorage(data_dir=data_dir) | 1 | 2023-10-18 12:57:53+00:00 | 8k |
instadeepai/flashbax | flashbax/buffers/prioritised_trajectory_buffer_test.py | [
{
"identifier": "prioritised_trajectory_buffer",
"path": "flashbax/buffers/prioritised_trajectory_buffer.py",
"snippet": "SET_BATCH_FN = {\n \"tpu\": sum_tree.set_batch_bincount,\n \"gpu\": sum_tree.set_batch_bincount,\n \"cpu\": sum_tree.set_batch_scan,\n}\nclass PrioritisedTrajectoryBufferState(TrajectoryBufferState, Generic[Experience]):\nclass PrioritisedTrajectoryBufferSample(TrajectoryBufferSample, Generic[Experience]):\nclass PrioritisedTrajectoryBuffer(\n TrajectoryBuffer[Experience, BufferState, BufferSample]\n):\ndef get_sum_tree_capacity(\n max_length_time_axis: int, period: int, add_batch_size: int\n) -> int:\ndef prioritised_init(\n experience: Experience,\n add_batch_size: int,\n max_length_time_axis: int,\n period: int,\n) -> PrioritisedTrajectoryBufferState[Experience]:\ndef calculate_item_indices_and_priorities(\n state: PrioritisedTrajectoryBufferState,\n sample_sequence_length: int,\n period: int,\n add_sequence_length: int,\n add_batch_size: int,\n max_length_time_axis: int,\n) -> Tuple[Array, Array]:\ndef _get_unnormalised_prob(\n add_batch_size: int,\n max_num_items: int,\n priority_mask: Array,\n state: PrioritisedTrajectoryBufferState,\n) -> Array:\ndef _get_priority_indices(\n add_batch_size: int,\n max_length_time_axis: int,\n max_num_items: int,\n period: int,\n starting_priority_item_index: Array,\n) -> Array:\ndef _get_ending_data_idx(\n add_sequence_length: int,\n max_length_time_axis: int,\n sample_sequence_length: int,\n state: PrioritisedTrajectoryBufferState,\n) -> Array:\ndef _get_starting_priority_item_idx(\n max_length_time_axis: int,\n period: int,\n previous_valid_data_index: Array,\n state: PrioritisedTrajectoryBufferState,\n) -> Array:\ndef get_prev_valid_data_idx(\n max_length_time_axis: int,\n max_subsequence_data_index: int,\n sample_sequence_length: int,\n state: PrioritisedTrajectoryBufferState,\n) -> Array:\ndef prioritised_add(\n state: PrioritisedTrajectoryBufferState[Experience],\n batch: Experience,\n sample_sequence_length: int,\n period: int,\n device: str,\n) -> PrioritisedTrajectoryBufferState[Experience]:\ndef prioritised_sample(\n state: PrioritisedTrajectoryBufferState[Experience],\n rng_key: chex.PRNGKey,\n batch_size: int,\n sequence_length: int,\n period: int,\n) -> PrioritisedTrajectoryBufferSample[Experience]:\ndef _get_sample_trajectories(\n item_indices: Array,\n max_length_time_axis: int,\n period: int,\n sequence_length: int,\n state: PrioritisedTrajectoryBufferState,\n):\ndef set_priorities(\n state: PrioritisedTrajectoryBufferState[Experience],\n indices: Indices,\n priorities: Priorities,\n priority_exponent: float,\n device: str,\n) -> PrioritisedTrajectoryBufferState[Experience]:\ndef validate_priority_exponent(priority_exponent: float):\ndef validate_device(device: str):\ndef make_prioritised_trajectory_buffer(\n add_batch_size: int,\n sample_batch_size: int,\n sample_sequence_length: int,\n period: int,\n min_length_time_axis: int,\n max_size: Optional[int] = None,\n max_length_time_axis: Optional[int] = None,\n priority_exponent: float = 0.6,\n device: str = \"cpu\",\n) -> PrioritisedTrajectoryBuffer:"
},
{
"identifier": "sum_tree",
"path": "flashbax/buffers/sum_tree.py",
"snippet": "class SumTreeState:\ndef get_tree_depth(capacity: int) -> int:\ndef init(capacity: int) -> SumTreeState:\ndef _total_priority(state: SumTreeState) -> Array:\ndef get_tree_index(\n depth_level: Union[Array, int], node_index: Union[Array, int]\n) -> Array:\ndef sample(\n state: SumTreeState,\n rng_key: Optional[chex.PRNGKey] = None,\n query_value: Optional[Array] = None,\n) -> Array:\n def get_node_index(\n depth_level: Array, carry: Tuple[Array, Array, Array]\n ) -> Tuple[Array, Array, Array]:\ndef stratified_sample(\n state: SumTreeState,\n batch_size: int,\n rng_key: chex.PRNGKey,\n) -> Array:\ndef get(state: SumTreeState, node_index: Array) -> Array:\ndef get_batch(state: SumTreeState, node_indices: Array) -> Array:\ndef set_non_batched(\n state: SumTreeState,\n node_index: Array,\n value: Array,\n) -> SumTreeState:\n def update_nodes(\n idx: Array, carry: Tuple[Array, Array, Array, Array]\n ) -> Tuple[Array, Array, Array, Array]:\ndef set_batch_bincount(\n state: SumTreeState, node_indices: Array, values: Array\n) -> SumTreeState:\n def update_nodes(i: Array, carry: Tuple[Array, Array, Array, Array]):\ndef set_batch_scan(\n state: SumTreeState, node_indices: Array, values: Array\n) -> SumTreeState:\n def update_node_priority(state: SumTreeState, node_data: Tuple[Array, Array]):"
},
{
"identifier": "trajectory_buffer",
"path": "flashbax/buffers/trajectory_buffer.py",
"snippet": "class TrajectoryBufferState(Generic[Experience]):\nclass TrajectoryBufferSample(Generic[Experience]):\ndef init(\n experience: Experience,\n add_batch_size: int,\n max_length_time_axis: int,\n) -> TrajectoryBufferState[Experience]:\ndef add(\n state: TrajectoryBufferState[Experience],\n batch: Experience,\n) -> TrajectoryBufferState[Experience]:\ndef get_invalid_indices(\n state: TrajectoryBufferState[Experience],\n sample_sequence_length: int,\n period: int,\n add_batch_size: int,\n max_length_time_axis: int,\n) -> Array:\ndef calculate_uniform_item_indices(\n state: TrajectoryBufferState[Experience],\n rng_key: chex.PRNGKey,\n batch_size: int,\n sample_sequence_length: int,\n period: int,\n add_batch_size: int,\n max_length_time_axis: int,\n) -> Array:\ndef sample(\n state: TrajectoryBufferState[Experience],\n rng_key: chex.PRNGKey,\n batch_size: int,\n sequence_length: int,\n period: int,\n) -> TrajectoryBufferSample[Experience]:\ndef can_sample(\n state: TrajectoryBufferState[Experience], min_length_time_axis: int\n) -> Array:"
},
{
"identifier": "get_fake_batch",
"path": "flashbax/buffers/conftest.py",
"snippet": "def get_fake_batch(fake_transition: chex.ArrayTree, batch_size) -> chex.ArrayTree:\n \"\"\"Create a fake batch with differing values for each transition.\"\"\"\n return jax.tree_map(\n lambda x: jnp.stack([x + i for i in range(batch_size)]), fake_transition\n )"
},
{
"identifier": "_DEVICE_COUNT_MOCK",
"path": "flashbax/conftest.py",
"snippet": "_DEVICE_COUNT_MOCK = 2"
}
] | from copy import deepcopy
from typing import List
from flashbax.buffers import prioritised_trajectory_buffer, sum_tree, trajectory_buffer
from flashbax.buffers.conftest import get_fake_batch
from flashbax.conftest import _DEVICE_COUNT_MOCK
import chex
import jax
import jax.numpy as jnp
import numpy as np
import pytest | 3,879 |
# Check correct the shape prefix is correct.
chex.assert_trees_all_equal_dtypes(
fake_transition, batch1.experience, batch2.experience
)
@pytest.mark.parametrize("sample_period", [1, 2, 3, 4, 5])
def test_prioritised_sample_with_period(
fake_transition: chex.ArrayTree,
min_length: int,
max_length: int,
add_batch_size: int,
sample_sequence_length: int,
rng_key: chex.PRNGKey,
sample_batch_size: int,
sample_period: int,
device: str,
) -> None:
"""Test the random sampling with different periods."""
# Choose period based on the degree of overlap tested
assert sample_sequence_length >= sample_period
rng_key1, rng_key2 = jax.random.split(rng_key)
# Initialise the buffer
state = prioritised_trajectory_buffer.prioritised_init(
fake_transition, add_batch_size, max_length, sample_period
)
# Create a batch but specifically ensure that sequences in different add_batch rows
# are distinct - this is simply for testing purposes in order to verify periodicity
fake_batch_sequence = jax.tree_map(
lambda x: jnp.stack([x + i * (max_length - 1) for i in range(add_batch_size)]),
get_fake_batch(fake_transition, max_length - 1),
)
assert np.prod(fake_batch_sequence["reward"].shape) == np.prod(
jnp.unique(fake_batch_sequence["reward"]).shape
)
# Add the fake sequence to the buffer
state = prioritised_trajectory_buffer.prioritised_add(
state, fake_batch_sequence, sample_sequence_length, sample_period, device
)
assert trajectory_buffer.can_sample(state, min_length)
# Sample from the buffer
batch1 = prioritised_trajectory_buffer.prioritised_sample(
state, rng_key1, sample_batch_size, sample_sequence_length, sample_period
)
# Check correct the shape prefix is correct.
chex.assert_tree_shape_prefix(
batch1.experience, (sample_batch_size, sample_sequence_length)
)
# Check that the initial value in each sequence is always in a position that is a
# multiple of the sample period or zero.
# We check each sequence compared to every other sequence.
for i in range(sample_batch_size):
equal = batch1.experience["reward"][i][0] == batch1.experience["reward"] # type: ignore
pos = jnp.argmax(equal, axis=1)
test = (pos % sample_period == 0).astype(jnp.int32) + (pos == 0).astype(
jnp.int32
)
assert jnp.all(test.astype(jnp.bool_))
def test_adjust_priorities(
fake_transition: chex.ArrayTree,
min_length: int,
max_length: int,
rng_key: chex.PRNGKey,
add_batch_size: int,
sample_sequence_length: int,
sample_batch_size: int,
sample_period: int,
priority_exponent: float,
device: str,
) -> None:
"""Test the adjustment of priorities in the buffer."""
rng_key1, rng_key2 = jax.random.split(rng_key)
state = prioritised_trajectory_buffer.prioritised_init(
fake_transition,
add_batch_size,
max_length,
sample_period,
)
# Fill buffer to the point that we can sample.
fake_batch_sequence = get_fake_batch_sequence(
fake_transition, add_batch_size, min_length + 10
)
state = prioritised_trajectory_buffer.prioritised_add(
state, fake_batch_sequence, sample_sequence_length, sample_period, device
)
# Sample from the buffer.
batch = prioritised_trajectory_buffer.prioritised_sample(
state, rng_key1, sample_batch_size, sample_sequence_length, sample_period
)
# Create fake new priorities, and apply the adjustment.
new_priorities = jnp.ones_like(batch.priorities) + 10007
state = prioritised_trajectory_buffer.set_priorities(
state, batch.indices, new_priorities, priority_exponent, device
)
# Check that this results in the correct changes to the state.
assert (
state.priority_state.max_recorded_priority
== jnp.max(new_priorities) ** priority_exponent
)
assert (
| # Copyright 2023 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@pytest.fixture()
def sample_sequence_length() -> int:
return 5
@pytest.fixture()
def add_sequence_length() -> int:
return 7
@pytest.fixture()
def sample_period() -> int:
return 3
@pytest.fixture()
def priority_exponent() -> float:
return 0.6
@pytest.fixture()
def device() -> str:
return "tpu"
@pytest.fixture()
def prioritised_state(
fake_transition: chex.ArrayTree,
max_length: int,
add_batch_size: int,
sample_period: int,
) -> prioritised_trajectory_buffer.PrioritisedTrajectoryBufferState:
"""Initialise the trajectory buffer state."""
return prioritised_trajectory_buffer.prioritised_init(
fake_transition,
add_batch_size,
max_length,
sample_period,
)
def get_fake_batch_sequence(
fake_transition: chex.ArrayTree, batch_size: int, sequence_length: int
) -> chex.ArrayTree:
return get_fake_batch(get_fake_batch(fake_transition, sequence_length), batch_size)
def test_add_and_can_sample_prioritised(
prioritised_state: prioritised_trajectory_buffer.PrioritisedTrajectoryBufferState,
fake_transition: chex.ArrayTree,
min_length: int,
max_length: int,
add_batch_size: int,
add_sequence_length: int,
sample_sequence_length: int,
sample_period: int,
device: str,
) -> None:
"""Check the `add` function by filling the buffer all
the way to the max_length and checking that it produces the expected behaviour .
"""
fake_batch_sequence = get_fake_batch_sequence(
fake_transition, add_batch_size, add_sequence_length
)
init_state = deepcopy(prioritised_state) # Save for later checks.
n_batches_to_fill = int(np.ceil(max_length / add_sequence_length))
n_batches_to_sample = int(np.ceil(min_length / add_sequence_length))
for i in range(n_batches_to_fill):
assert not prioritised_state.is_full
prioritised_state = prioritised_trajectory_buffer.prioritised_add(
prioritised_state,
fake_batch_sequence,
sample_sequence_length,
sample_period,
device,
)
num_added_timesteps = (i + 1) * add_sequence_length
assert prioritised_state.current_index == (num_added_timesteps % max_length)
# Check that the `can_sample` function behavior is correct.
is_ready_to_sample = trajectory_buffer.can_sample(prioritised_state, min_length)
if i < (n_batches_to_sample - 1):
assert not is_ready_to_sample
else:
assert is_ready_to_sample
assert prioritised_state.is_full
# Check that the trajectorys have been updated.
with pytest.raises(AssertionError):
chex.assert_trees_all_close(prioritised_state.experience, init_state.experience)
def test_prioritised_sample(
prioritised_state: prioritised_trajectory_buffer.PrioritisedTrajectoryBufferState,
fake_transition: chex.ArrayTree,
min_length: int,
add_batch_size: int,
sample_sequence_length: int,
rng_key: chex.PRNGKey,
sample_batch_size: int,
sample_period: int,
device: str,
) -> None:
"""Test the random sampling from the buffer."""
rng_key1, rng_key2 = jax.random.split(rng_key)
# Fill buffer to the point that we can sample
fake_batch_sequence = get_fake_batch_sequence(
fake_transition, add_batch_size, min_length + 10
)
prioritised_state = prioritised_trajectory_buffer.prioritised_add(
prioritised_state,
fake_batch_sequence,
sample_sequence_length,
sample_period,
device,
)
assert trajectory_buffer.can_sample(prioritised_state, min_length)
# Sample from the buffer with different keys and check it gives us different batches.
batch1 = prioritised_trajectory_buffer.prioritised_sample(
prioritised_state,
rng_key1,
sample_batch_size,
sample_sequence_length,
sample_period,
)
batch2 = prioritised_trajectory_buffer.prioritised_sample(
prioritised_state,
rng_key2,
sample_batch_size,
sample_sequence_length,
sample_period,
)
# Check that the trajectorys have been updated.
with pytest.raises(AssertionError):
chex.assert_trees_all_close(batch1, batch2)
assert (batch1.priorities > 0).all()
assert (batch2.priorities > 0).all()
# Check correct the shape prefix is correct.
chex.assert_trees_all_equal_dtypes(
fake_transition, batch1.experience, batch2.experience
)
@pytest.mark.parametrize("sample_period", [1, 2, 3, 4, 5])
def test_prioritised_sample_with_period(
fake_transition: chex.ArrayTree,
min_length: int,
max_length: int,
add_batch_size: int,
sample_sequence_length: int,
rng_key: chex.PRNGKey,
sample_batch_size: int,
sample_period: int,
device: str,
) -> None:
"""Test the random sampling with different periods."""
# Choose period based on the degree of overlap tested
assert sample_sequence_length >= sample_period
rng_key1, rng_key2 = jax.random.split(rng_key)
# Initialise the buffer
state = prioritised_trajectory_buffer.prioritised_init(
fake_transition, add_batch_size, max_length, sample_period
)
# Create a batch but specifically ensure that sequences in different add_batch rows
# are distinct - this is simply for testing purposes in order to verify periodicity
fake_batch_sequence = jax.tree_map(
lambda x: jnp.stack([x + i * (max_length - 1) for i in range(add_batch_size)]),
get_fake_batch(fake_transition, max_length - 1),
)
assert np.prod(fake_batch_sequence["reward"].shape) == np.prod(
jnp.unique(fake_batch_sequence["reward"]).shape
)
# Add the fake sequence to the buffer
state = prioritised_trajectory_buffer.prioritised_add(
state, fake_batch_sequence, sample_sequence_length, sample_period, device
)
assert trajectory_buffer.can_sample(state, min_length)
# Sample from the buffer
batch1 = prioritised_trajectory_buffer.prioritised_sample(
state, rng_key1, sample_batch_size, sample_sequence_length, sample_period
)
# Check correct the shape prefix is correct.
chex.assert_tree_shape_prefix(
batch1.experience, (sample_batch_size, sample_sequence_length)
)
# Check that the initial value in each sequence is always in a position that is a
# multiple of the sample period or zero.
# We check each sequence compared to every other sequence.
for i in range(sample_batch_size):
equal = batch1.experience["reward"][i][0] == batch1.experience["reward"] # type: ignore
pos = jnp.argmax(equal, axis=1)
test = (pos % sample_period == 0).astype(jnp.int32) + (pos == 0).astype(
jnp.int32
)
assert jnp.all(test.astype(jnp.bool_))
def test_adjust_priorities(
fake_transition: chex.ArrayTree,
min_length: int,
max_length: int,
rng_key: chex.PRNGKey,
add_batch_size: int,
sample_sequence_length: int,
sample_batch_size: int,
sample_period: int,
priority_exponent: float,
device: str,
) -> None:
"""Test the adjustment of priorities in the buffer."""
rng_key1, rng_key2 = jax.random.split(rng_key)
state = prioritised_trajectory_buffer.prioritised_init(
fake_transition,
add_batch_size,
max_length,
sample_period,
)
# Fill buffer to the point that we can sample.
fake_batch_sequence = get_fake_batch_sequence(
fake_transition, add_batch_size, min_length + 10
)
state = prioritised_trajectory_buffer.prioritised_add(
state, fake_batch_sequence, sample_sequence_length, sample_period, device
)
# Sample from the buffer.
batch = prioritised_trajectory_buffer.prioritised_sample(
state, rng_key1, sample_batch_size, sample_sequence_length, sample_period
)
# Create fake new priorities, and apply the adjustment.
new_priorities = jnp.ones_like(batch.priorities) + 10007
state = prioritised_trajectory_buffer.set_priorities(
state, batch.indices, new_priorities, priority_exponent, device
)
# Check that this results in the correct changes to the state.
assert (
state.priority_state.max_recorded_priority
== jnp.max(new_priorities) ** priority_exponent
)
assert ( | sum_tree.get(state.priority_state, batch.indices) | 1 | 2023-10-17 10:57:14+00:00 | 8k |
TheDuckAI/DuckTrack | ducktrack/app.py | [
{
"identifier": "close_obs",
"path": "ducktrack/obs_client.py",
"snippet": "def close_obs(obs_process: subprocess.Popen):\n if obs_process:\n obs_process.terminate()\n try:\n obs_process.wait(timeout=5)\n except subprocess.TimeoutExpired:\n obs_process.kill()"
},
{
"identifier": "is_obs_running",
"path": "ducktrack/obs_client.py",
"snippet": "def is_obs_running() -> bool:\n try:\n for process in psutil.process_iter(attrs=[\"pid\", \"name\"]):\n if \"obs\" in process.info[\"name\"].lower():\n return True\n return False\n except:\n raise Exception(\"Could not check if OBS is running already. Please check manually.\")"
},
{
"identifier": "open_obs",
"path": "ducktrack/obs_client.py",
"snippet": "def open_obs() -> subprocess.Popen:\n try:\n obs_path = find_obs()\n if system() == \"Windows\":\n # you have to change the working directory first for OBS to find the correct locale on windows\n os.chdir(os.path.dirname(obs_path))\n obs_path = os.path.basename(obs_path)\n return subprocess.Popen([obs_path, \"--startreplaybuffer\", \"--minimize-to-tray\"])\n except:\n raise Exception(\"Failed to find OBS, please open OBS manually.\")"
},
{
"identifier": "Player",
"path": "ducktrack/playback.py",
"snippet": "class Player:\n \"\"\"\n Plays back recordings.\n \"\"\"\n \n def __init__(self):\n self.stop_playback = False\n self.listener = KeyCombinationListener()\n \n def stop_comb_pressed():\n self.stop_playback = True\n return False\n \n self.listener.add_comb((\"shift\", \"esc\"), stop_comb_pressed)\n self.listener.start()\n \n def play(self, recording_path: str):\n with open(os.path.join(recording_path, \"events.jsonl\"), \"r\") as f:\n events = [json.loads(line) for line in f.readlines()]\n \n with open(os.path.join(recording_path, \"metadata.json\"), \"r\") as f:\n metadata = json.load(f)\n \n self.playback(events, metadata)\n \n def playback(self, events: list[dict], metadata: dict):\n if metadata[\"system\"] == \"Windows\":\n fix_windows_dpi_scaling()\n \n mouse_controller = MouseController()\n keyboard_controller = KeyboardController()\n\n if not events:\n self.listener.stop()\n return\n\n presses_to_skip = 0\n releases_to_skip = 0\n \n in_click_sequence = False\n \n for i, event in enumerate(events):\n start_time = time.perf_counter()\n \n if self.stop_playback:\n return\n \n def do_mouse_press(button):\n for j, second_event in enumerate(events[i+1:]):\n # make sure the time between mouse clicks is less than 500ms\n if second_event[\"time_stamp\"] - event[\"time_stamp\"] > 0.5:\n break\n \n if \"x\" in second_event and \"y\" in second_event:\n # if the mouse moves out of the click radius/rectangle, it is not a click sequence\n if math.sqrt((second_event[\"y\"] - event[\"y\"]) ** 2 +\n (second_event[\"x\"] - event[\"x\"]) ** 2) > 4:\n break\n \n if second_event[\"action\"] == \"click\" and second_event[\"pressed\"]:\n for k, third_event in enumerate(events[i+j+2:]):\n if third_event[\"time_stamp\"] - second_event[\"time_stamp\"] > 0.5:\n break\n \n if \"x\" in third_event and \"y\" in third_event:\n if math.sqrt((third_event[\"y\"] - event[\"y\"]) ** 2 +\n (third_event[\"x\"] - event[\"x\"]) ** 2) > 5:\n break\n \n if third_event[\"action\"] == \"click\" and third_event[\"pressed\"]:\n mouse_controller.click(button, 3) \n return 2, 2\n\n mouse_controller.click(button, 2)\n return 1, 1\n \n mouse_controller.press(button)\n return 0, 0\n\n if event[\"action\"] == \"move\":\n mouse_controller.position = (event[\"x\"], event[\"y\"])\n\n elif event[\"action\"] == \"click\":\n button = name_to_button(event[\"button\"])\n \n if event[\"pressed\"]:\n if presses_to_skip == 0:\n presses, releases = do_mouse_press(button) \n presses_to_skip += presses\n releases_to_skip += releases\n \n if presses > 0:\n in_click_sequence = True\n else:\n presses_to_skip -= 1\n else:\n if releases_to_skip == 0:\n mouse_controller.release(button)\n \n if in_click_sequence:\n keyboard_controller.press(Key.shift)\n mouse_controller.click(Button.left)\n keyboard_controller.release(Key.shift)\n in_click_sequence = False\n else:\n releases_to_skip -= 1\n\n elif event[\"action\"] == \"scroll\":\n if metadata[\"system\"] == \"Windows\":\n # for some reason on windows, pynput scroll is correct but pyautogui is not\n mouse_controller.scroll(metadata[\"scroll_direction\"] * event[\"dx\"], metadata[\"scroll_direction\"] * event[\"dy\"])\n else:\n pyautogui.hscroll(clicks=metadata[\"scroll_direction\"] * event[\"dx\"])\n pyautogui.vscroll(clicks=metadata[\"scroll_direction\"] * event[\"dy\"])\n\n elif event[\"action\"] in [\"press\", \"release\"]:\n key = name_to_key(event[\"name\"])\n if event[\"action\"] == \"press\":\n keyboard_controller.press(key)\n else:\n keyboard_controller.release(key)\n \n # sleep for the correct amount of time\n \n end_time = time.perf_counter()\n execution_time = end_time - start_time\n\n if i + 1 < len(events):\n desired_delay = events[i + 1][\"time_stamp\"] - event[\"time_stamp\"]\n delay = desired_delay - execution_time\n if delay < 0:\n print(f\"warning: behind by {-delay * 1000:.3f} ms\")\n elif delay != 0:\n wait_until = time.perf_counter() + delay\n while time.perf_counter() < wait_until:\n pass\n \n self.listener.stop()"
},
{
"identifier": "get_latest_recording",
"path": "ducktrack/playback.py",
"snippet": "def get_latest_recording() -> str:\n recordings_dir = get_recordings_dir()\n if not os.path.exists(recordings_dir):\n raise Exception(\"The recordings directory does not exist\")\n \n recordings = [os.path.join(recordings_dir, f) for f in os.listdir(recordings_dir) if os.path.isdir(os.path.join(recordings_dir, f))]\n \n if len(recordings) == 0:\n raise Exception(\"You have no recordings to play back\")\n \n latest_recording = max(recordings, key=os.path.getctime)\n\n return latest_recording"
},
{
"identifier": "Recorder",
"path": "ducktrack/recorder.py",
"snippet": "class Recorder(QThread):\n \"\"\"\n Makes recordings.\n \"\"\"\n \n recording_stopped = pyqtSignal()\n\n def __init__(self, natural_scrolling: bool):\n super().__init__()\n \n if system() == \"Windows\":\n fix_windows_dpi_scaling()\n \n self.recording_path = self._get_recording_path()\n \n self._is_recording = False\n self._is_paused = False\n \n self.event_queue = Queue()\n self.events_file = open(os.path.join(self.recording_path, \"events.jsonl\"), \"a\")\n \n self.metadata_manager = MetadataManager(\n recording_path=self.recording_path, \n natural_scrolling=natural_scrolling\n )\n self.obs_client = OBSClient(recording_path=self.recording_path, \n metadata=self.metadata_manager.metadata)\n\n self.mouse_listener = mouse.Listener(\n on_move=self.on_move,\n on_click=self.on_click,\n on_scroll=self.on_scroll)\n \n self.keyboard_listener = keyboard.Listener(\n on_press=self.on_press, \n on_release=self.on_release)\n \n def on_move(self, x, y):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"move\", \n \"x\": x, \n \"y\": y}, block=False)\n \n def on_click(self, x, y, button, pressed):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"click\", \n \"x\": x, \n \"y\": y, \n \"button\": button.name, \n \"pressed\": pressed}, block=False)\n \n def on_scroll(self, x, y, dx, dy):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"scroll\", \n \"x\": x, \n \"y\": y, \n \"dx\": dx, \n \"dy\": dy}, block=False)\n \n def on_press(self, key):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"press\", \n \"name\": key.char if type(key) == KeyCode else key.name}, block=False)\n\n def on_release(self, key):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"release\", \n \"name\": key.char if type(key) == KeyCode else key.name}, block=False)\n\n def run(self):\n self._is_recording = True\n \n self.metadata_manager.collect()\n self.obs_client.start_recording()\n \n self.mouse_listener.start()\n self.keyboard_listener.start()\n \n while self._is_recording:\n event = self.event_queue.get()\n self.events_file.write(json.dumps(event) + \"\\n\")\n\n def stop_recording(self):\n if self._is_recording:\n self._is_recording = False\n\n self.metadata_manager.end_collect()\n \n self.mouse_listener.stop()\n self.keyboard_listener.stop()\n \n self.obs_client.stop_recording()\n self.metadata_manager.add_obs_record_state_timings(self.obs_client.record_state_events)\n self.events_file.close()\n self.metadata_manager.save_metadata()\n \n self.recording_stopped.emit()\n \n def pause_recording(self):\n if not self._is_paused and self._is_recording:\n self._is_paused = True\n self.obs_client.pause_recording()\n self.event_queue.put({\"time_stamp\": time.perf_counter(),\n \"action\": \"pause\"}, block=False)\n\n def resume_recording(self):\n if self._is_paused and self._is_recording:\n self._is_paused = False\n self.obs_client.resume_recording()\n self.event_queue.put({\"time_stamp\": time.perf_counter(),\n \"action\": \"resume\"}, block=False)\n\n def _get_recording_path(self) -> str:\n recordings_dir = get_recordings_dir()\n\n if not os.path.exists(recordings_dir):\n os.mkdir(recordings_dir)\n\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n \n recording_path = os.path.join(recordings_dir, f\"recording-{current_time}\")\n os.mkdir(recording_path)\n\n return recording_path"
},
{
"identifier": "get_recordings_dir",
"path": "ducktrack/util.py",
"snippet": "def get_recordings_dir() -> str:\n documents_folder = Path.home() / 'Documents' / 'DuckTrack_Recordings'\n return str(documents_folder)"
},
{
"identifier": "open_file",
"path": "ducktrack/util.py",
"snippet": "def open_file(path):\n if platform.system() == \"Windows\":\n os.startfile(path)\n elif platform.system() == \"Darwin\":\n subprocess.Popen([\"open\", path])\n else:\n subprocess.Popen([\"xdg-open\", path])"
}
] | import os
import sys
from platform import system
from PyQt6.QtCore import QTimer, pyqtSlot
from PyQt6.QtGui import QAction, QIcon
from PyQt6.QtWidgets import (QApplication, QCheckBox, QDialog, QFileDialog,
QFormLayout, QLabel, QLineEdit, QMenu,
QMessageBox, QPushButton, QSystemTrayIcon,
QTextEdit, QVBoxLayout, QWidget)
from .obs_client import close_obs, is_obs_running, open_obs
from .playback import Player, get_latest_recording
from .recorder import Recorder
from .util import get_recordings_dir, open_file | 3,919 | self.setWindowTitle("Recording Details")
layout = QVBoxLayout(self)
self.form_layout = QFormLayout()
self.title_label = QLabel("Title:")
self.title_input = QLineEdit(self)
self.form_layout.addRow(self.title_label, self.title_input)
self.description_label = QLabel("Description:")
self.description_input = QTextEdit(self)
self.form_layout.addRow(self.description_label, self.description_input)
layout.addLayout(self.form_layout)
self.submit_button = QPushButton("Save", self)
self.submit_button.clicked.connect(self.accept)
layout.addWidget(self.submit_button)
def get_values(self):
return self.title_input.text(), self.description_input.toPlainText()
class MainInterface(QWidget):
def __init__(self, app: QApplication):
super().__init__()
self.tray = QSystemTrayIcon(QIcon(resource_path("assets/duck.png")))
self.tray.show()
self.app = app
self.init_tray()
self.init_window()
if not is_obs_running():
self.obs_process = open_obs()
def init_window(self):
self.setWindowTitle("DuckTrack")
layout = QVBoxLayout(self)
self.toggle_record_button = QPushButton("Start Recording", self)
self.toggle_record_button.clicked.connect(self.toggle_record)
layout.addWidget(self.toggle_record_button)
self.toggle_pause_button = QPushButton("Pause Recording", self)
self.toggle_pause_button.clicked.connect(self.toggle_pause)
self.toggle_pause_button.setEnabled(False)
layout.addWidget(self.toggle_pause_button)
self.show_recordings_button = QPushButton("Show Recordings", self)
self.show_recordings_button.clicked.connect(lambda: open_file(get_recordings_dir()))
layout.addWidget(self.show_recordings_button)
self.play_latest_button = QPushButton("Play Latest Recording", self)
self.play_latest_button.clicked.connect(self.play_latest_recording)
layout.addWidget(self.play_latest_button)
self.play_custom_button = QPushButton("Play Custom Recording", self)
self.play_custom_button.clicked.connect(self.play_custom_recording)
layout.addWidget(self.play_custom_button)
self.replay_recording_button = QPushButton("Replay Recording", self)
self.replay_recording_button.clicked.connect(self.replay_recording)
self.replay_recording_button.setEnabled(False)
layout.addWidget(self.replay_recording_button)
self.quit_button = QPushButton("Quit", self)
self.quit_button.clicked.connect(self.quit)
layout.addWidget(self.quit_button)
self.natural_scrolling_checkbox = QCheckBox("Natural Scrolling", self, checked=system() == "Darwin")
layout.addWidget(self.natural_scrolling_checkbox)
self.natural_scrolling_checkbox.stateChanged.connect(self.toggle_natural_scrolling)
self.setLayout(layout)
def init_tray(self):
self.menu = QMenu()
self.tray.setContextMenu(self.menu)
self.toggle_record_action = QAction("Start Recording")
self.toggle_record_action.triggered.connect(self.toggle_record)
self.menu.addAction(self.toggle_record_action)
self.toggle_pause_action = QAction("Pause Recording")
self.toggle_pause_action.triggered.connect(self.toggle_pause)
self.toggle_pause_action.setVisible(False)
self.menu.addAction(self.toggle_pause_action)
self.show_recordings_action = QAction("Show Recordings")
self.show_recordings_action.triggered.connect(lambda: open_file(get_recordings_dir()))
self.menu.addAction(self.show_recordings_action)
self.play_latest_action = QAction("Play Latest Recording")
self.play_latest_action.triggered.connect(self.play_latest_recording)
self.menu.addAction(self.play_latest_action)
self.play_custom_action = QAction("Play Custom Recording")
self.play_custom_action.triggered.connect(self.play_custom_recording)
self.menu.addAction(self.play_custom_action)
self.replay_recording_action = QAction("Replay Recording")
self.replay_recording_action.triggered.connect(self.replay_recording)
self.menu.addAction(self.replay_recording_action)
self.replay_recording_action.setVisible(False)
self.quit_action = QAction("Quit")
self.quit_action.triggered.connect(self.quit)
self.menu.addAction(self.quit_action)
self.menu.addSeparator()
self.natural_scrolling_option = QAction("Natural Scrolling", checkable=True, checked=system() == "Darwin")
self.natural_scrolling_option.triggered.connect(self.toggle_natural_scrolling)
self.menu.addAction(self.natural_scrolling_option)
@pyqtSlot()
def replay_recording(self):
|
class TitleDescriptionDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle("Recording Details")
layout = QVBoxLayout(self)
self.form_layout = QFormLayout()
self.title_label = QLabel("Title:")
self.title_input = QLineEdit(self)
self.form_layout.addRow(self.title_label, self.title_input)
self.description_label = QLabel("Description:")
self.description_input = QTextEdit(self)
self.form_layout.addRow(self.description_label, self.description_input)
layout.addLayout(self.form_layout)
self.submit_button = QPushButton("Save", self)
self.submit_button.clicked.connect(self.accept)
layout.addWidget(self.submit_button)
def get_values(self):
return self.title_input.text(), self.description_input.toPlainText()
class MainInterface(QWidget):
def __init__(self, app: QApplication):
super().__init__()
self.tray = QSystemTrayIcon(QIcon(resource_path("assets/duck.png")))
self.tray.show()
self.app = app
self.init_tray()
self.init_window()
if not is_obs_running():
self.obs_process = open_obs()
def init_window(self):
self.setWindowTitle("DuckTrack")
layout = QVBoxLayout(self)
self.toggle_record_button = QPushButton("Start Recording", self)
self.toggle_record_button.clicked.connect(self.toggle_record)
layout.addWidget(self.toggle_record_button)
self.toggle_pause_button = QPushButton("Pause Recording", self)
self.toggle_pause_button.clicked.connect(self.toggle_pause)
self.toggle_pause_button.setEnabled(False)
layout.addWidget(self.toggle_pause_button)
self.show_recordings_button = QPushButton("Show Recordings", self)
self.show_recordings_button.clicked.connect(lambda: open_file(get_recordings_dir()))
layout.addWidget(self.show_recordings_button)
self.play_latest_button = QPushButton("Play Latest Recording", self)
self.play_latest_button.clicked.connect(self.play_latest_recording)
layout.addWidget(self.play_latest_button)
self.play_custom_button = QPushButton("Play Custom Recording", self)
self.play_custom_button.clicked.connect(self.play_custom_recording)
layout.addWidget(self.play_custom_button)
self.replay_recording_button = QPushButton("Replay Recording", self)
self.replay_recording_button.clicked.connect(self.replay_recording)
self.replay_recording_button.setEnabled(False)
layout.addWidget(self.replay_recording_button)
self.quit_button = QPushButton("Quit", self)
self.quit_button.clicked.connect(self.quit)
layout.addWidget(self.quit_button)
self.natural_scrolling_checkbox = QCheckBox("Natural Scrolling", self, checked=system() == "Darwin")
layout.addWidget(self.natural_scrolling_checkbox)
self.natural_scrolling_checkbox.stateChanged.connect(self.toggle_natural_scrolling)
self.setLayout(layout)
def init_tray(self):
self.menu = QMenu()
self.tray.setContextMenu(self.menu)
self.toggle_record_action = QAction("Start Recording")
self.toggle_record_action.triggered.connect(self.toggle_record)
self.menu.addAction(self.toggle_record_action)
self.toggle_pause_action = QAction("Pause Recording")
self.toggle_pause_action.triggered.connect(self.toggle_pause)
self.toggle_pause_action.setVisible(False)
self.menu.addAction(self.toggle_pause_action)
self.show_recordings_action = QAction("Show Recordings")
self.show_recordings_action.triggered.connect(lambda: open_file(get_recordings_dir()))
self.menu.addAction(self.show_recordings_action)
self.play_latest_action = QAction("Play Latest Recording")
self.play_latest_action.triggered.connect(self.play_latest_recording)
self.menu.addAction(self.play_latest_action)
self.play_custom_action = QAction("Play Custom Recording")
self.play_custom_action.triggered.connect(self.play_custom_recording)
self.menu.addAction(self.play_custom_action)
self.replay_recording_action = QAction("Replay Recording")
self.replay_recording_action.triggered.connect(self.replay_recording)
self.menu.addAction(self.replay_recording_action)
self.replay_recording_action.setVisible(False)
self.quit_action = QAction("Quit")
self.quit_action.triggered.connect(self.quit)
self.menu.addAction(self.quit_action)
self.menu.addSeparator()
self.natural_scrolling_option = QAction("Natural Scrolling", checkable=True, checked=system() == "Darwin")
self.natural_scrolling_option.triggered.connect(self.toggle_natural_scrolling)
self.menu.addAction(self.natural_scrolling_option)
@pyqtSlot()
def replay_recording(self): | player = Player() | 3 | 2023-10-18 19:34:19+00:00 | 8k |
e4s2023/E4S2023 | swap_face_fine/face_vid2vid/modules/generator.py | [
{
"identifier": "ResBlock2d",
"path": "swap_face_fine/face_vid2vid/modules/util.py",
"snippet": "class ResBlock2d(nn.Module):\n \"\"\"\n Res block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, kernel_size, padding):\n super(ResBlock2d, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.norm1 = BatchNorm2d(in_features, affine=True)\n self.norm2 = BatchNorm2d(in_features, affine=True)\n\n def forward(self, x):\n out = self.norm1(x)\n out = F.relu(out)\n out = self.conv1(out)\n out = self.norm2(out)\n out = F.relu(out)\n out = self.conv2(out)\n out += x\n return out"
},
{
"identifier": "SameBlock2d",
"path": "swap_face_fine/face_vid2vid/modules/util.py",
"snippet": "class SameBlock2d(nn.Module):\n \"\"\"\n Simple block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False):\n super(SameBlock2d, self).__init__()\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,\n kernel_size=kernel_size, padding=padding, groups=groups)\n self.norm = BatchNorm2d(out_features, affine=True)\n if lrelu:\n self.ac = nn.LeakyReLU()\n else:\n self.ac = nn.ReLU()\n\n def forward(self, x):\n out = self.conv(x)\n out = self.norm(out)\n out = self.ac(out)\n return out"
},
{
"identifier": "UpBlock2d",
"path": "swap_face_fine/face_vid2vid/modules/util.py",
"snippet": "class UpBlock2d(nn.Module):\n \"\"\"\n Upsampling block for use in decoder.\n \"\"\"\n\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\n super(UpBlock2d, self).__init__()\n\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\n padding=padding, groups=groups)\n self.norm = BatchNorm2d(out_features, affine=True)\n\n def forward(self, x):\n out = F.interpolate(x, scale_factor=2)\n out = self.conv(out)\n out = self.norm(out)\n out = F.relu(out)\n return out"
},
{
"identifier": "DownBlock2d",
"path": "swap_face_fine/face_vid2vid/modules/util.py",
"snippet": "class DownBlock2d(nn.Module):\n \"\"\"\n Downsampling block for use in encoder.\n \"\"\"\n\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\n super(DownBlock2d, self).__init__()\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\n padding=padding, groups=groups)\n self.norm = BatchNorm2d(out_features, affine=True)\n self.pool = nn.AvgPool2d(kernel_size=(2, 2))\n\n def forward(self, x):\n out = self.conv(x)\n out = self.norm(out)\n out = F.relu(out)\n out = self.pool(out)\n return out"
},
{
"identifier": "ResBlock3d",
"path": "swap_face_fine/face_vid2vid/modules/util.py",
"snippet": "class ResBlock3d(nn.Module):\n \"\"\"\n Res block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, kernel_size, padding):\n super(ResBlock3d, self).__init__()\n self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.norm1 = BatchNorm3d(in_features, affine=True)\n self.norm2 = BatchNorm3d(in_features, affine=True)\n\n def forward(self, x):\n out = self.norm1(x)\n out = F.relu(out)\n out = self.conv1(out)\n out = self.norm2(out)\n out = F.relu(out)\n out = self.conv2(out)\n out += x\n return out"
},
{
"identifier": "SPADEResnetBlock",
"path": "swap_face_fine/face_vid2vid/modules/util.py",
"snippet": "class SPADEResnetBlock(nn.Module):\n def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1):\n super().__init__()\n # Attributes\n self.learned_shortcut = (fin != fout)\n fmiddle = min(fin, fout)\n self.use_se = use_se\n # create conv layers\n self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)\n self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)\n if self.learned_shortcut:\n self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)\n # apply spectral norm if specified\n if 'spectral' in norm_G:\n self.conv_0 = spectral_norm(self.conv_0)\n self.conv_1 = spectral_norm(self.conv_1)\n if self.learned_shortcut:\n self.conv_s = spectral_norm(self.conv_s)\n # define normalization layers\n self.norm_0 = SPADE(fin, label_nc)\n self.norm_1 = SPADE(fmiddle, label_nc)\n if self.learned_shortcut:\n self.norm_s = SPADE(fin, label_nc)\n\n def forward(self, x, seg1):\n x_s = self.shortcut(x, seg1)\n dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))\n dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))\n out = x_s + dx\n return out\n\n def shortcut(self, x, seg1):\n if self.learned_shortcut:\n x_s = self.conv_s(self.norm_s(x, seg1))\n else:\n x_s = x\n return x_s\n\n def actvn(self, x):\n return F.leaky_relu(x, 2e-1)"
},
{
"identifier": "DenseMotionNetwork",
"path": "swap_face_fine/face_vid2vid/modules/dense_motion.py",
"snippet": "class DenseMotionNetwork(nn.Module):\n \"\"\"\n Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving\n \"\"\"\n\n def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress,\n estimate_occlusion_map=False):\n super(DenseMotionNetwork, self).__init__()\n # self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(feature_channel+1), max_features=max_features, num_blocks=num_blocks)\n self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks)\n\n self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3)\n\n self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1)\n self.norm = BatchNorm3d(compress, affine=True)\n\n if estimate_occlusion_map:\n # self.occlusion = nn.Conv2d(reshape_channel*reshape_depth, 1, kernel_size=7, padding=3)\n self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)\n else:\n self.occlusion = None\n\n self.num_kp = num_kp\n\n\n def create_sparse_motions(self, feature, kp_driving, kp_source):\n bs, _, d, h, w = feature.shape\n identity_grid = make_coordinate_grid((d, h, w), type=kp_source['value'].type())\n identity_grid = identity_grid.view(1, 1, d, h, w, 3)\n coordinate_grid = identity_grid - kp_driving['value'].view(bs, self.num_kp, 1, 1, 1, 3)\n \n k = coordinate_grid.shape[1]\n \n # if 'jacobian' in kp_driving:\n if 'jacobian' in kp_driving and kp_driving['jacobian'] is not None:\n jacobian = torch.matmul(kp_source['jacobian'], torch.inverse(kp_driving['jacobian']))\n jacobian = jacobian.unsqueeze(-3).unsqueeze(-3).unsqueeze(-3)\n jacobian = jacobian.repeat(1, 1, d, h, w, 1, 1)\n coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1))\n coordinate_grid = coordinate_grid.squeeze(-1)\n '''\n if 'rot' in kp_driving:\n rot_s = kp_source['rot']\n rot_d = kp_driving['rot']\n rot = torch.einsum('bij, bjk->bki', rot_s, torch.inverse(rot_d))\n rot = rot.unsqueeze(-3).unsqueeze(-3).unsqueeze(-3).unsqueeze(-3)\n rot = rot.repeat(1, k, d, h, w, 1, 1)\n # print(rot.shape)\n coordinate_grid = torch.matmul(rot, coordinate_grid.unsqueeze(-1))\n coordinate_grid = coordinate_grid.squeeze(-1)\n # print(coordinate_grid.shape)\n '''\n driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3)\n\n #adding background feature\n identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)\n sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1)\n \n # sparse_motions = driving_to_source\n\n return sparse_motions\n\n def create_deformed_feature(self, feature, sparse_motions):\n bs, _, d, h, w = feature.shape\n feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w)\n feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w)\n sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3)\n sparse_deformed = F.grid_sample(feature_repeat, sparse_motions)\n sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w)\n return sparse_deformed\n\n def create_heatmap_representations(self, feature, kp_driving, kp_source):\n spatial_size = feature.shape[3:]\n gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01)\n gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01)\n heatmap = gaussian_driving - gaussian_source\n\n # adding background feature\n zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type())\n heatmap = torch.cat([zeros, heatmap], dim=1)\n heatmap = heatmap.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)\n return heatmap\n\n def forward(self, feature, kp_driving, kp_source):\n bs, _, d, h, w = feature.shape\n\n feature = self.compress(feature)\n feature = self.norm(feature)\n feature = F.relu(feature)\n\n out_dict = dict()\n sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source)\n deformed_feature = self.create_deformed_feature(feature, sparse_motion)\n\n heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source)\n\n input = torch.cat([heatmap, deformed_feature], dim=2)\n input = input.view(bs, -1, d, h, w)\n\n # input = deformed_feature.view(bs, -1, d, h, w) # (bs, num_kp+1 * c, d, h, w)\n\n prediction = self.hourglass(input)\n\n mask = self.mask(prediction)\n mask = F.softmax(mask, dim=1)\n out_dict['mask'] = mask\n mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)\n sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w)\n deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w)\n deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3)\n\n out_dict['deformation'] = deformation\n\n if self.occlusion:\n bs, c, d, h, w = prediction.shape\n prediction = prediction.view(bs, -1, h, w)\n occlusion_map = torch.sigmoid(self.occlusion(prediction))\n out_dict['occlusion_map'] = occlusion_map\n\n return out_dict"
}
] | import torch
import torch.nn.functional as F
from torch import nn
from swap_face_fine.face_vid2vid.modules.util import ResBlock2d, SameBlock2d, UpBlock2d, DownBlock2d, ResBlock3d, SPADEResnetBlock
from swap_face_fine.face_vid2vid.modules.dense_motion import DenseMotionNetwork | 4,015 |
class OcclusionAwareGenerator(nn.Module):
"""
Generator follows NVIDIA architecture.
"""
def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
super(OcclusionAwareGenerator, self).__init__()
if dense_motion_params is not None:
self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
estimate_occlusion_map=estimate_occlusion_map,
**dense_motion_params)
else:
self.dense_motion_network = None
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))
down_blocks = []
for i in range(num_down_blocks):
in_features = min(max_features, block_expansion * (2 ** i))
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
self.down_blocks = nn.ModuleList(down_blocks)
self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
self.reshape_channel = reshape_channel
self.reshape_depth = reshape_depth
self.resblocks_3d = torch.nn.Sequential()
for i in range(num_resblocks):
self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
out_features = block_expansion * (2 ** (num_down_blocks))
self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)
self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)
self.resblocks_2d = torch.nn.Sequential()
for i in range(num_resblocks):
self.resblocks_2d.add_module('2dr' + str(i), ResBlock2d(out_features, kernel_size=3, padding=1))
up_blocks = []
for i in range(num_down_blocks):
in_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i)))
out_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i - 1)))
|
class OcclusionAwareGenerator(nn.Module):
"""
Generator follows NVIDIA architecture.
"""
def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
super(OcclusionAwareGenerator, self).__init__()
if dense_motion_params is not None:
self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
estimate_occlusion_map=estimate_occlusion_map,
**dense_motion_params)
else:
self.dense_motion_network = None
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))
down_blocks = []
for i in range(num_down_blocks):
in_features = min(max_features, block_expansion * (2 ** i))
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
self.down_blocks = nn.ModuleList(down_blocks)
self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
self.reshape_channel = reshape_channel
self.reshape_depth = reshape_depth
self.resblocks_3d = torch.nn.Sequential()
for i in range(num_resblocks):
self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
out_features = block_expansion * (2 ** (num_down_blocks))
self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)
self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)
self.resblocks_2d = torch.nn.Sequential()
for i in range(num_resblocks):
self.resblocks_2d.add_module('2dr' + str(i), ResBlock2d(out_features, kernel_size=3, padding=1))
up_blocks = []
for i in range(num_down_blocks):
in_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i)))
out_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i - 1))) | up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) | 2 | 2023-10-15 12:15:01+00:00 | 8k |
lancopku/label-words-are-anchors | icl/analysis/compress_top.py | [
{
"identifier": "LMForwardAPI",
"path": "icl/lm_apis/lm_api_base.py",
"snippet": "class LMForwardAPI(nn.Module):\n def __init__(self, model, model_name, tokenizer, label_dict: Dict[int, str], device='cuda:0'):\n super().__init__()\n self._use_past_key_values = False\n self._past_key_values = None\n self.model = model\n self.model_name = model_name\n self.tokenizer = tokenizer\n self.device = device\n self.model.eval()\n self.calibration_probs = None\n self.use_calibration_probs = False\n self.probs_from_results_fn = None\n self.results_args: dict = {}\n self.label_map = {tokenizer.encode(v, add_special_tokens=False)[0]: k for k, v in\n label_dict.items()}\n self.position_offset = 0\n\n assert model_name in ['gpt2-xl', 'gpt-j-6b']\n\n @property\n def device(self):\n return self.model.device\n\n @device.setter\n def device(self, device):\n print(f'LMForwardAPI: set device to {device}')\n self.model = self.model.to(device)\n if self.past_key_values:\n self.past_key_values = self.past_key_values # will reset device\n\n def cal_logits(self, inputs, **kwargs):\n self.model.eval()\n inputs = dict_to(inputs, self.device)\n\n if self.use_past_key_values:\n past_key_values = self.get_past_key_values(inputs)\n kwargs['past_key_values'] = past_key_values\n inputs['attention_mask'] = self.get_mask_with_past_key_values(inputs['attention_mask'])\n if self.model_name in ['gpt-j-6b','gpt2-xl']:\n bsz, sql = inputs['input_ids'].shape\n position_ids = torch.arange(sql, dtype=torch.long, device=self.device).repeat(bsz, 1)\n position_ids = position_ids + self.position_offset\n kwargs['position_ids'] = position_ids\n\n results = self.model(\n input_ids=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n **kwargs,\n )\n logits = results['logits']\n # find last position before pad tokens\n input_ids = inputs['input_ids']\n eos_token_id: int = self.tokenizer.eos_token_id\n is_not_eos = input_ids != eos_token_id\n prediction_pos = is_not_eos.sum(dim=1) - 1\n is_not_eos = is_not_eos.float()\n # check all eos_tokens are at the end\n assert (is_not_eos[:, :-1] - is_not_eos[:, 1:] >= 0).all()\n # get logits for the last position\n logits = logits[torch.arange(input_ids.shape[0]), prediction_pos, :]\n return logits, results\n\n def _cal_probs(self, logits):\n interest_index = list(self.label_map.keys())\n logits = logits[:, interest_index]\n probs = F.softmax(logits, dim=-1)\n if self.use_calibration_probs:\n assert self.calibration_probs is not None\n probs = probs / self.calibration_probs\n return probs, logits\n\n def cal_probs(self, inputs, **kwargs):\n logits, results = self.cal_logits(inputs, **kwargs)\n probs, logits = self._cal_probs(logits)\n return probs, logits, results\n\n def cal_probs_from_results(self, inputs, results):\n return self.probs_from_results_fn(inputs, results)\n\n @property\n def past_key_values(self):\n return self._past_key_values\n\n @past_key_values.setter\n def past_key_values(self, past_key_values):\n if past_key_values is not None:\n assert isinstance(past_key_values, tuple)\n assert isinstance(past_key_values[0], tuple)\n assert len(past_key_values[0]) == 2\n assert isinstance(past_key_values[0][0], torch.Tensor)\n assert past_key_values[0][0].shape[0] == 1\n self._past_key_values = tuple(\n tuple(t.to(self.device) for t in tup) for tup in past_key_values)\n else:\n self._past_key_values = None\n\n @property\n def use_past_key_values(self):\n return self._use_past_key_values\n\n @use_past_key_values.setter\n def use_past_key_values(self, use_past_key_values):\n self._use_past_key_values = use_past_key_values\n\n def get_mask_with_past_key_values(self, mask):\n if self.past_key_values is None:\n raise ValueError('past_key_values is None, please set it first')\n batch_size = mask.shape[0]\n past_key_values_len = self.past_key_values[0][0].shape[2]\n mask = torch.cat(\n [torch.ones(batch_size, past_key_values_len, dtype=torch.bool, device=self.device),\n mask], dim=1)\n return mask\n\n def get_past_key_values(self, inputs):\n if self.past_key_values is None:\n raise ValueError('past_key_values is None, please set it first')\n batch_size = inputs['input_ids'].shape[0]\n past_key_values = ()\n for layer_key, layer_value in self.past_key_values:\n past_key_values += (\n layer_key.expand(batch_size, -1, -1, -1),\n layer_value.expand(batch_size, -1, -1, -1)),\n\n return past_key_values\n\n @torch.no_grad()\n def forward_no_grad(self, inputs):\n ori_logits, results = self.cal_logits(inputs, **self.results_args)\n probs, logits = self._cal_probs(ori_logits)\n probs_from_results = self.cal_probs_from_results(inputs, results)\n probs_from_results['ori_logits'] = ori_logits\n return probs, probs_from_results\n\n def forward(self, **kwargs):\n ori_logits, results = self.cal_logits(kwargs, **self.results_args)\n probs, logits = self._cal_probs(ori_logits)\n result = {'probs': probs, 'logits': logits, 'results': results}\n if self.probs_from_results_fn:\n probs_from_results = self.cal_probs_from_results(kwargs, results)\n result['probs_from_results'] = probs_from_results\n result['ori_logits'] = ori_logits\n return result"
},
{
"identifier": "ContextSolver",
"path": "icl/util_classes/context_solver.py",
"snippet": "class ContextSolver:\n def __init__(self, task_name, tokenizer=None):\n assert task_name in ['sst2', 'trec', 'agnews', 'emo']\n self.task_name = task_name\n self.tokenizer = tokenizer\n self.format_s = format_s_dict[task_name]\n self.parse_format_s()\n\n def parse_format_s(self):\n self.X_prefix = self.format_s.split('\\n')[0].split(':')[0] + ':'\n self.Y_prefix = self.format_s.split('\\n')[1].split(':')[0] + ':'\n\n def get_empty_demo_context(self, context: str, only_demo_part=True):\n context = context.split('\\n')\n for i, line in enumerate(context[:-2]):\n if self.X_prefix in line:\n line = self.X_prefix\n elif self.Y_prefix in line:\n line = line\n else:\n raise warnings.warn('Global prefix or other str exists!')\n context[i] = line\n if only_demo_part:\n context = context[:-2]\n context = '\\n'.join(context)\n return context\n\n def get_mask_strings_and_match_before(self, context, input_ids, tokenizer=None):\n if tokenizer is None:\n tokenizer = self.tokenizer\n poss = torch.where(input_ids == tokenizer.encode('\\n', add_special_tokens=False)[0])[0]\n if len(poss) >= 2:\n match_before = poss[-2] + 1\n else:\n match_before = None\n\n list_s = []\n list_s.append(self.X_prefix)\n list_s.append('\\n' + self.X_prefix)\n context = context.split('\\n')\n for i, line in enumerate(context[:-2]):\n if self.X_prefix in line:\n pass\n elif self.Y_prefix in line:\n list_s.append('\\n' + line)\n list_s.append('\\n' + line + '\\n')\n else:\n raise warnings.warn('Global prefix or other str exists!')\n return list_s, match_before\n\n def get_mask(self, input_ids, tokenizer=None):\n if isinstance(input_ids, list):\n input_ids = torch.tensor(input_ids)\n if len(input_ids.shape) == 2:\n assert input_ids.shape[0] == 1\n input_ids = input_ids[0]\n if tokenizer is None:\n tokenizer = self.tokenizer\n context = tokenizer.decode(input_ids)\n list_s, match_before = self.get_mask_strings_and_match_before(context, input_ids=input_ids,\n tokenizer=tokenizer)\n tensor_str_finder = TensorStrFinder(tokenizer=tokenizer)\n mask = tensor_str_finder.get_strs_mask_in_tensor(list_s=list_s, t=input_ids,\n match_before=match_before)\n return mask"
},
{
"identifier": "Predictor",
"path": "icl/util_classes/predictor_classes.py",
"snippet": "class Predictor:\n def __init__(self, label_id_dict, pad_token_id, task_name, tokenizer, layer,\n naive_class_embs=None,\n naive_final_emb=None) -> None:\n self.naive_class_embs = naive_class_embs\n self.naive_final_emb = naive_final_emb\n self.label_id_dict = label_id_dict\n self.pad_token_id = pad_token_id\n self.task_name = task_name\n self.tokenizer = tokenizer\n self.layer = layer\n\n if task_name == 'sst2':\n self.prefix_idxs = [tokenizer.encode('Sentiment', add_special_tokens=False)[-1],\n tokenizer.encode(':', add_special_tokens=False)[0]]\n elif task_name == 'agnews':\n self.prefix_idxs = [tokenizer.encode('Answer', add_special_tokens=False)[-1],\n tokenizer.encode(':', add_special_tokens=False)[0]]\n elif task_name == 'trec':\n self.prefix_idxs = [tokenizer.encode(' Type', add_special_tokens=False)[-1],\n tokenizer.encode(':', add_special_tokens=False)[0]]\n elif task_name == 'emo':\n self.prefix_idxs = [tokenizer.encode('Emotion', add_special_tokens=False)[-1],\n tokenizer.encode(':', add_special_tokens=False)[0]]\n else:\n raise NotImplementedError(f\"task_name: {task_name}\")\n\n def get_pos(self, inputs):\n label_id_dict = self.label_id_dict\n pad_token_id = self.pad_token_id\n final_pos = (inputs['input_ids'] != pad_token_id).int().sum(-1) - 1\n device = inputs['input_ids'].device\n bsz, sql = inputs['input_ids'].shape\n class_poss = []\n for idx in label_id_dict.values():\n class_idx = idx\n for offset, prefix_idx in enumerate(reversed(self.prefix_idxs)):\n class_idx += prefix_idx * 100000 ** (offset + 1)\n input_ids = inputs['input_ids'].detach().clone()\n input_ids[:, 1:] += inputs['input_ids'][:, :-1] * 100000\n input_ids[:, 2:] += inputs['input_ids'][:, :-2] * 100000 * 100000\n class_pos = torch.arange(sql, device=device).unsqueeze(0).repeat(bsz, 1)[\n input_ids == class_idx].squeeze()\n class_poss.append(class_pos)\n return class_poss, final_pos\n\n def _cal_all_key_and_values_of_class(self, inputs, past_key_values, one_class_one_list=False,\n include_final=False):\n class_poss, final_pos = self.get_pos(inputs)\n\n if include_final:\n class_poss.append(final_pos)\n\n def get_vecs(ker_or_value, class_poss):\n batch_idx = torch.arange(inputs['input_ids'].shape[0])\n class_vecs = []\n for poss in class_poss:\n class_vec = ker_or_value[batch_idx, :, poss, :]\n class_vecs.append(class_vec.unsqueeze(-2))\n if not one_class_one_list:\n class_vecs = torch.cat(class_vecs, dim=-2)\n return class_vecs\n\n key_and_values = []\n for layer in range(0, self.layer):\n key_and_values.append(tuple([get_vecs(_, class_poss) for _ in past_key_values[layer]]))\n return key_and_values # tuple of tuple of tensor (bsz, n_head, num_class, d_head)\n\n def cal_all_key_and_values_of_class(self, inputs, results, one_class_one_list=False,\n include_final=False):\n past_key_values = results.past_key_values\n key_and_values = self._cal_all_key_and_values_of_class(inputs, past_key_values,\n one_class_one_list=one_class_one_list,\n include_final=include_final)\n return key_and_values # tuple of tuple of tensor (bsz, n_head, num_class, d_head)\n\n def get_attention(self, inputs, results, layer):\n class_poss, final_pos = self.get_pos(inputs)\n batch_idx = torch.arange(inputs['input_ids'].shape[0])\n scores = []\n for class_pos in class_poss:\n attention = results.attentions[layer][batch_idx, :, final_pos, class_pos]\n score = attention\n if class_pos.numel() == 1:\n score = score.sum(-1)\n else:\n score = score.sum()\n if inputs['input_ids'].shape[0] != 1:\n warnings.warn(f'Only support batch_size=1 now!')\n scores.append(score.unsqueeze(0))\n scores = torch.cat(scores, dim=0)\n return scores\n\n def cal_all_sim_attn(self, inputs, results):\n sims = []\n for layer in range(0, self.layer):\n sim = self.get_attention(inputs=inputs, results=results, layer=layer)\n sims.append(sim.unsqueeze(1))\n sims = torch.cat(sims, dim=1)\n sims = sims.reshape(inputs['input_ids'].shape[0], -1)\n return sims"
},
{
"identifier": "wrap_dataset",
"path": "icl/utils/data_wrapper.py",
"snippet": "def wrap_dataset(dataset: datasets.arrow_dataset.Dataset, demonstration, label_dict, task_name):\n def wrap(example):\n example['sentence'] = wrap_data(demonstrations=demonstration, input_sample=example,\n label_dict=label_dict, task_name=task_name)\n example['labels'] = example['label']\n return example\n\n dataset = dataset.map(wrap)\n return dataset"
},
{
"identifier": "tokenize_dataset",
"path": "icl/utils/data_wrapper.py",
"snippet": "def tokenize_dataset(dataset, tokenizer):\n def tokenize_function(examples):\n return tokenizer(examples[\"sentence\"], padding=True,\n max_length=get_max_length(tokenizer),\n truncation=True,\n return_tensors='pt')\n\n tokenized_datasets = dataset.map(tokenize_function, batched=True)\n return tokenized_datasets"
},
{
"identifier": "wrap_dataset_with_instruct",
"path": "icl/utils/data_wrapper.py",
"snippet": "def wrap_dataset_with_instruct(dataset: datasets.arrow_dataset.Dataset, instruct, label_dict,\n task_name):\n def wrap(example):\n example['sentence'] = instruct_wrapper(instruct=instruct, input_sample=example,\n label_dict=label_dict, task_name=task_name)\n example['labels'] = example['label']\n return example\n\n dataset = dataset.map(wrap)\n return dataset"
},
{
"identifier": "remove_str_columns",
"path": "icl/utils/data_wrapper.py",
"snippet": "def remove_str_columns(dataset):\n remove_keys = {k for k, v in dataset.features.items() if v.dtype == 'string'}\n dataset = dataset.remove_columns(list(remove_keys))\n return dataset"
},
{
"identifier": "load_huggingface_dataset_train_and_test",
"path": "icl/utils/load_huggingface_dataset.py",
"snippet": "def load_huggingface_dataset_train_and_test(task_name):\n dataset = None\n if task_name == 'sst2':\n try:\n dataset = load_from_local(task_name, ['train', 'validation'])\n except FileNotFoundError:\n dataset = load_dataset('glue', 'sst2', split=['train', 'validation'])\n for i, _ in enumerate(dataset):\n dataset[i] = dataset[i].rename_column('sentence', 'text')\n # rename validation to test\n elif task_name == 'agnews':\n try:\n dataset = load_from_local(task_name, ['train', 'test'])\n except FileNotFoundError:\n dataset = load_dataset('ag_news', split=['train', 'test'])\n elif task_name == 'trec':\n try:\n dataset = load_from_local(task_name, ['train', 'test'])\n except FileNotFoundError:\n dataset = load_dataset('trec', split=['train', 'test'])\n coarse_label_name = 'coarse_label' if 'coarse_label' in dataset[\n 0].column_names else 'label-coarse'\n for i, _ in enumerate(dataset):\n dataset[i] = dataset[i].rename_column(coarse_label_name, 'label')\n elif task_name == 'emo':\n try:\n dataset = load_from_local(task_name, ['train', 'test'])\n except FileNotFoundError:\n dataset = load_dataset('emo', split=['train', 'test'])\n if dataset is None:\n raise NotImplementedError(f\"task_name: {task_name}\")\n dataset = {'train': dataset[0], 'test': dataset[1]}\n return dataset"
},
{
"identifier": "set_seed",
"path": "icl/utils/random_utils.py",
"snippet": "def set_seed(seed):\n seed = int(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.enabled = True"
},
{
"identifier": "load_args",
"path": "icl/utils/other.py",
"snippet": "def load_args(args_type, is_ipynb=False):\n if not is_ipynb:\n parser = HfArgumentParser((args_type,))\n args, = parser.parse_args_into_dataclasses()\n else:\n args = args_type()\n return args"
},
{
"identifier": "set_gpu",
"path": "icl/utils/other.py",
"snippet": "def set_gpu(gpu_id: Union[str, int]):\n if isinstance(gpu_id, int):\n gpu_id = str(gpu_id)\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_id"
},
{
"identifier": "sample_two_set_with_shot_per_class",
"path": "icl/utils/other.py",
"snippet": "def sample_two_set_with_shot_per_class(ori_data, a_shot, b_shot, seed, label_name: str = 'labels',\n a_total_shot=None, b_total_shot=None):\n a_label_count = {}\n b_label_count = {}\n a_data_idx = []\n b_data_idx = []\n all_indices = [_ for _ in range(len(ori_data))]\n np_temp_random(seed=seed)(np.random.shuffle)(all_indices)\n\n a_total_cnt = 0\n b_total_cnt = 0\n for index in all_indices:\n label = ori_data[index][label_name]\n if label < 0:\n continue\n\n if label not in a_label_count.keys():\n a_label_count[label] = 0\n if label not in b_label_count.keys():\n b_label_count[label] = 0\n\n if a_label_count[label] < a_shot:\n a_data_idx.append(index)\n a_label_count[label] += 1\n a_total_cnt += 1\n elif b_label_count[label] < b_shot:\n b_data_idx.append(index)\n b_label_count[label] += 1\n b_total_cnt += 1\n\n a_cond = a_total_shot is not None and a_total_cnt >= a_total_shot\n b_cond = (b_total_shot is not None and b_total_cnt >= b_total_shot) or (b_shot == 0)\n if a_cond and b_cond:\n warnings.warn(f\"sampled {a_total_shot} and {b_total_shot} samples, \")\n\n a_data = ori_data.select(a_data_idx)\n b_data = ori_data.select(b_data_idx)\n return a_data, b_data"
},
{
"identifier": "get_model_layer_num",
"path": "icl/utils/load_local.py",
"snippet": "def get_model_layer_num(model = None, model_name = None):\n num_layer = None\n if model is not None:\n if hasattr(model.config, 'num_hidden_layers'):\n num_layer = model.config.num_hidden_layers\n elif hasattr(model.config, 'n_layers'):\n num_layer = model.config.n_layers\n elif hasattr(model.config, 'n_layer'):\n num_layer = model.config.n_layer\n else:\n pass\n elif model_name is not None:\n pass\n if num_layer is None:\n raise ValueError(f'cannot get num_layer from model: {model} or model_name: {model_name}')\n return num_layer"
},
{
"identifier": "CompressTopArgs",
"path": "icl/util_classes/arg_classes.py",
"snippet": "class CompressTopArgs(DeepArgs):\n ks_num: int = 20\n save_folder: str = os.path.join(FOLDER_ROOT, 'results', 'compress_top')"
},
{
"identifier": "load_model_and_tokenizer",
"path": "icl/utils/prepare_model_and_tokenizer.py",
"snippet": "def load_model_and_tokenizer(args: DeepArgs):\n if args.model_name in ['gpt2-xl', 'gpt-j-6b']:\n tokenizer = load_local_model_or_tokenizer(args.model_name, 'tokenizer')\n if tokenizer is None:\n tokenizer = AutoTokenizer.from_pretrained(args.model_name)\n model = load_local_model_or_tokenizer(args.model_name, 'model')\n if model is None:\n model = AutoModelForCausalLM.from_pretrained(args.model_name)\n tokenizer.pad_token = tokenizer.eos_token\n else:\n raise NotImplementedError(f\"model_name: {args.model_name}\")\n return model, tokenizer"
},
{
"identifier": "get_label_id_dict_for_args",
"path": "icl/utils/prepare_model_and_tokenizer.py",
"snippet": "def get_label_id_dict_for_args(args: DeepArgs, tokenizer):\n label_id_dict = {k: tokenizer.encode(v, add_special_tokens=False)[0] for k, v in\n args.label_dict.items()}\n for v in args.label_dict.values():\n token_num = len(tokenizer.encode(v, add_special_tokens=False))\n if token_num != 1:\n warnings.warn(f\"{v} in {args.task_name} has token_num: {token_num} which is not 1\")\n return label_id_dict"
}
] | import pickle
import random
import warnings
import os
import numpy as np
import torch
import torch.nn.functional as F
from dataclasses import dataclass, field
from typing import List
from transformers.hf_argparser import HfArgumentParser
from sklearn.metrics import accuracy_score
from .prefixier import Prefixer
from ..lm_apis.lm_api_base import LMForwardAPI
from ..util_classes.context_solver import ContextSolver
from ..util_classes.predictor_classes import Predictor
from ..utils.data_wrapper import wrap_dataset, tokenize_dataset, wrap_dataset_with_instruct, \
remove_str_columns
from ..utils.load_huggingface_dataset import load_huggingface_dataset_train_and_test
from ..utils.random_utils import set_seed
from ..utils.other import load_args, set_gpu, sample_two_set_with_shot_per_class
from transformers import Trainer, TrainingArguments, PreTrainedModel, AutoModelForCausalLM, \
AutoTokenizer, DataCollatorWithPadding
from ..utils.load_local import get_model_layer_num
from ..util_classes.arg_classes import CompressTopArgs
from ..utils.prepare_model_and_tokenizer import load_model_and_tokenizer, get_label_id_dict_for_args | 5,943 |
class TruncatingDataCollator(DataCollatorWithPadding):
def __init__(self, tokenizer, max_length: int, padding=True, pad_to_multiple_of=None):
super().__init__(tokenizer=tokenizer, padding=padding,
pad_to_multiple_of=pad_to_multiple_of)
self.max_length = max_length
def __call__(self, features: List[dict]):
batch = super().__call__(features)
for key, value in batch.items():
if isinstance(value, torch.Tensor) and len(value.shape) == 2:
batch[key] = value[:, :self.max_length]
return batch
def get_label(y):
return y.predictions[0].argmax(-1)
def get_logits(y):
if y.predictions[2].shape[-1] > 30000:
return y.predictions[2]
else:
return y.predictions[3]
def get_topk(y, k):
logits = get_logits(y)
indices = np.argpartition(logits, -k,axis=1)[:,-k:]
return indices
def jaccard(a,b):
scores = []
for single_a, single_b in zip(a,b):
set_a = set(single_a)
set_b = set(single_b)
score = len(set_a.intersection(set_b))/len(set_a.union(set_b))
scores.append(score)
return np.array(scores).mean()
def compress(args: CompressTopArgs):
if os.path.exists(args.save_file_name):
return
# set_gpu(args.gpu)
if args.sample_from == 'test':
dataset = load_huggingface_dataset_train_and_test(args.task_name)
else:
raise NotImplementedError(f"sample_from: {args.sample_from}")
model, tokenizer = load_model_and_tokenizer(args)
args.label_id_dict = get_label_id_dict_for_args(args, tokenizer)
model = LMForwardAPI(model=model, model_name=args.model_name, tokenizer=tokenizer,
device='cuda:0',
label_dict=args.label_dict)
|
class TruncatingDataCollator(DataCollatorWithPadding):
def __init__(self, tokenizer, max_length: int, padding=True, pad_to_multiple_of=None):
super().__init__(tokenizer=tokenizer, padding=padding,
pad_to_multiple_of=pad_to_multiple_of)
self.max_length = max_length
def __call__(self, features: List[dict]):
batch = super().__call__(features)
for key, value in batch.items():
if isinstance(value, torch.Tensor) and len(value.shape) == 2:
batch[key] = value[:, :self.max_length]
return batch
def get_label(y):
return y.predictions[0].argmax(-1)
def get_logits(y):
if y.predictions[2].shape[-1] > 30000:
return y.predictions[2]
else:
return y.predictions[3]
def get_topk(y, k):
logits = get_logits(y)
indices = np.argpartition(logits, -k,axis=1)[:,-k:]
return indices
def jaccard(a,b):
scores = []
for single_a, single_b in zip(a,b):
set_a = set(single_a)
set_b = set(single_b)
score = len(set_a.intersection(set_b))/len(set_a.union(set_b))
scores.append(score)
return np.array(scores).mean()
def compress(args: CompressTopArgs):
if os.path.exists(args.save_file_name):
return
# set_gpu(args.gpu)
if args.sample_from == 'test':
dataset = load_huggingface_dataset_train_and_test(args.task_name)
else:
raise NotImplementedError(f"sample_from: {args.sample_from}")
model, tokenizer = load_model_and_tokenizer(args)
args.label_id_dict = get_label_id_dict_for_args(args, tokenizer)
model = LMForwardAPI(model=model, model_name=args.model_name, tokenizer=tokenizer,
device='cuda:0',
label_dict=args.label_dict)
| set_seed(args.seeds[0]) | 8 | 2023-10-17 11:40:03+00:00 | 8k |
Aggify/aggify | aggify/aggify.py | [
{
"identifier": "F",
"path": "aggify/compiler.py",
"snippet": "class F:\n def __init__(self, field: Union[str, Dict[str, list]]):\n if isinstance(field, str):\n self.field = f\"${field.replace('__', '.')}\"\n else:\n self.field = field\n\n def to_dict(self):\n return self.field\n\n def __add__(self, other):\n if isinstance(other, F):\n other = other.field\n\n if isinstance(self.field, dict) and self.field.get(\"$add\") is not None:\n self.field[\"$add\"].append(other)\n combined_field = self.field\n else:\n combined_field = {\"$add\": [self.field, other]}\n\n return F(combined_field)\n\n def __sub__(self, other):\n if isinstance(other, F):\n other = other.field\n\n if isinstance(self.field, dict) and self.field.get(\"$subtract\") is not None:\n self.field[\"$subtract\"].append(other)\n combined_field = self.field\n else:\n combined_field = {\"$subtract\": [self.field, other]}\n return F(combined_field)\n\n def __mul__(self, other):\n if isinstance(other, F):\n other = other.field\n\n if isinstance(self.field, dict) and self.field.get(\"$multiply\") is not None:\n self.field[\"$multiply\"].append(other)\n combined_field = self.field\n else:\n combined_field = {\"$multiply\": [self.field, other]}\n return F(combined_field)\n\n def __truediv__(self, other):\n if isinstance(other, F):\n other = other.field\n\n if isinstance(self.field, dict) and self.field.get(\"$divide\") is not None:\n self.field[\"$divide\"].append(other)\n combined_field = self.field\n else:\n combined_field = {\"$divide\": [self.field, other]}\n return F(combined_field)\n\n @staticmethod\n def is_suitable_for_match(key: str) -> bool:\n if \"__\" not in key:\n return False\n return True\n\n def first(self):\n return {\"$first\": self.field}\n\n def last(self):\n return {\"$last\": self.field}\n\n def min(self):\n return {\"$min\": self.field}\n\n def max(self):\n return {\"$max\": self.field}\n\n def sum(self):\n return {\"$sum\": self.field}\n\n def avg(self):\n return {\"$avg\": self.field}"
},
{
"identifier": "Match",
"path": "aggify/compiler.py",
"snippet": "class Match:\n def __init__(\n self, matches: Dict[str, Any], base_model: Union[Type[Document], None]\n ):\n self.matches = matches\n self.base_model = base_model\n\n @staticmethod\n def validate_operator(key: str):\n _op = key.rsplit(\"__\", 1)\n try:\n operator = _op[1]\n except IndexError:\n raise InvalidOperator(str(_op)) from None\n\n if operator not in Operators.COMPARISON_OPERATORS:\n raise InvalidOperator(operator)\n\n def is_base_model_field(self, field) -> bool:\n \"\"\"\n Check if a field in the base model class is of a specific type.\n EmbeddedDocumentField: Field which is embedded.\n TopLevelDocumentMetaclass: Field which is added by lookup stage.\n\n Args:\n field (str): The name of the field to check.\n\n Returns:\n bool: True if the field is of type EmbeddedDocumentField or TopLevelDocumentMetaclass\n and the base_model is not None, otherwise False.\n \"\"\"\n return self.base_model is not None and (\n isinstance(\n self.base_model._fields.get(field), # noqa\n (EmbeddedDocumentField, TopLevelDocumentMetaclass),\n )\n )\n\n def compile(self, pipelines: list) -> Dict[str, Dict[str, list]]:\n match_query = {}\n for key, value in self.matches.items():\n if isinstance(value, F):\n if F.is_suitable_for_match(key) is False:\n raise InvalidOperator(key)\n\n if \"__\" not in key:\n key = get_db_field(self.base_model, key)\n match_query[key] = value\n continue\n\n field, operator, *others = key.split(\"__\")\n if (\n self.is_base_model_field(field)\n and operator not in Operators.ALL_OPERATORS\n ):\n field_db_name = get_db_field(self.base_model, field)\n\n nested_field_name = get_db_field(\n get_nested_field_model(self.base_model, field), operator\n )\n key = (\n f\"{field_db_name}.{nested_field_name}__\" + \"__\".join(others)\n ).rstrip(\"__\")\n pipelines.append(Match({key: value}, self.base_model).compile([]))\n continue\n\n if operator not in Operators.ALL_OPERATORS:\n raise InvalidOperator(operator)\n db_field = get_db_field(self.base_model, field)\n match_query = Operators(match_query).compile_match(\n operator, value, db_field\n )\n\n return {\"$match\": match_query}"
},
{
"identifier": "Q",
"path": "aggify/compiler.py",
"snippet": "class Q:\n def __init__(self, pipeline: Union[list, None] = None, **conditions):\n pipeline = pipeline or []\n self.conditions: dict[str, list] = (\n Match(\n matches=conditions,\n base_model=None,\n )\n .compile(pipeline)\n .get(\"$match\", {})\n )\n\n def __iter__(self):\n yield \"$match\", self.conditions\n\n def __or__(self, other):\n if self.conditions.get(\"$or\"):\n self.conditions[\"$or\"].append(dict(other)[\"$match\"])\n combined_conditions = self.conditions\n\n else:\n combined_conditions = {\"$or\": [self.conditions, dict(other)[\"$match\"]]}\n return Q(**combined_conditions)\n\n def __and__(self, other):\n if self.conditions.get(\"$and\"):\n self.conditions[\"$and\"].append(dict(other)[\"$match\"])\n combined_conditions = self.conditions\n else:\n combined_conditions = {\"$and\": [self.conditions, dict(other)[\"$match\"]]}\n return Q(**combined_conditions)\n\n def __invert__(self):\n combined_conditions = {\"$not\": [self.conditions]}\n return Q(**combined_conditions)"
},
{
"identifier": "Operators",
"path": "aggify/compiler.py",
"snippet": "class Operators:\n # noinspection SpellCheckingInspection\n QUERY_OPERATORS = {\n \"exact\": \"$eq\",\n \"iexact\": \"$regex\",\n \"contains\": \"$regex\",\n \"icontains\": \"$regex\",\n \"startswith\": \"$regex\",\n \"istartswith\": \"$regex\",\n \"endswith\": \"$regex\",\n \"iendswith\": \"$regex\",\n \"in\": \"$in\",\n \"nin\": \"$nin\",\n \"ne\": \"$ne\",\n \"not\": \"$not\",\n }\n\n COMPARISON_OPERATORS = {\n \"lt\": \"$lt\",\n \"lte\": \"$lte\",\n \"gt\": \"$gt\",\n \"gte\": \"$gte\",\n }\n\n ALL_OPERATORS = {\n **QUERY_OPERATORS,\n **COMPARISON_OPERATORS,\n }\n\n # noinspection SpellCheckingInspection\n REGEX_PATTERNS = {\n \"iexact\": \"^{value}$\",\n \"contains\": \"{value}\",\n \"icontains\": \"{value}\",\n \"startswith\": \"^{value}\",\n \"istartswith\": \"^{value}\",\n \"endswith\": \"{value}$\",\n \"iendswith\": \"{value}$\",\n }\n\n # noinspection SpellCheckingInspection\n REGEX_OPTIONS = {\n \"iexact\": \"i\",\n \"icontains\": \"i\",\n \"istartswith\": \"i\",\n \"iendswith\": \"i\",\n }\n\n def __init__(self, match_query: Dict[str, Any]):\n self.match_query = match_query\n\n def compile_match(self, operator: str, value, field: str):\n # TODO: i don't like this, we can refactor it later.\n # I think there should be easier way to inject comparison operators to be defined per each\n # like map an existing template to each operator\n\n if operator in Operators.REGEX_PATTERNS:\n if isinstance(value, F):\n raise ValueError(\"Not implemented yet\")\n pattern = Operators.REGEX_PATTERNS[operator].format(value=value)\n # Create the base query with the pattern\n query = {Operators.ALL_OPERATORS[operator]: pattern}\n\n # If there's an option for the operator, add it to the query\n if operator in Operators.REGEX_OPTIONS:\n query[\"$options\"] = Operators.REGEX_OPTIONS[operator]\n\n self.match_query[field] = query\n elif operator in Operators.ALL_OPERATORS:\n if isinstance(value, F):\n self.match_query[\"$expr\"] = {\n Operators.ALL_OPERATORS[operator]: [f\"${field}\", value.to_dict()]\n }\n else:\n self.match_query[field] = {Operators.ALL_OPERATORS[operator]: value}\n\n return self.match_query"
},
{
"identifier": "Cond",
"path": "aggify/compiler.py",
"snippet": "class Cond:\n \"\"\"\n input: Cond(23, '>', 20, 'hi', 'bye')\n return: {'$cond': {'if': {'$gt': [23, 20]}, 'then': 'hi', 'else': 'bye'}}\n \"\"\"\n\n OPERATOR_MAPPING = {\n \">\": \"$gt\",\n \">=\": \"$gte\",\n \"<\": \"$lt\",\n \"<=\": \"$lte\",\n \"==\": \"$eq\",\n \"!=\": \"$ne\",\n }\n\n def __init__(self, value1, condition, value2, then_value, else_value):\n self.value1 = value1\n self.value2 = value2\n self.condition = self._map_condition(condition)\n self.then_value = then_value\n self.else_value = else_value\n\n def _map_condition(self, condition):\n if condition in self.OPERATOR_MAPPING:\n return self.OPERATOR_MAPPING[condition]\n raise InvalidOperator(condition)\n\n def __iter__(self):\n \"\"\"Iterator used by `dict` to create a dictionary from a `Cond` object\n\n With this method we are now able to do this:\n c = Cond(...)\n dict_of_c = dict(c)\n\n instead of c.to_dict()\n\n Returns:\n A tuple of '$cond' and its value\n \"\"\"\n yield (\n \"$cond\",\n {\n \"if\": {self.condition: [self.value1, self.value2]},\n \"then\": self.then_value,\n \"else\": self.else_value,\n },\n )"
},
{
"identifier": "AggifyValueError",
"path": "aggify/exceptions.py",
"snippet": "class AggifyValueError(AggifyBaseException):\n def __init__(self, expected_list: List[Type], result: Type):\n self.message = (\n f\"Input is not correctly passed, expected either of {[expected for expected in expected_list]}\"\n f\"but got {result}\"\n )\n self.expecteds = expected_list\n self.result = result\n super().__init__(self.message)"
},
{
"identifier": "AnnotationError",
"path": "aggify/exceptions.py",
"snippet": "class AnnotationError(InvalidPipelineStageError):\n pass"
},
{
"identifier": "InvalidField",
"path": "aggify/exceptions.py",
"snippet": "class InvalidField(AggifyBaseException):\n def __init__(self, field: str):\n self.message = f\"Field {field} does not exists.\"\n super().__init__(self.message)"
},
{
"identifier": "InvalidEmbeddedField",
"path": "aggify/exceptions.py",
"snippet": "class InvalidEmbeddedField(AggifyBaseException):\n def __init__(self, field: str):\n self.message = f\"Field {field} is not embedded.\"\n super().__init__(self.message)"
},
{
"identifier": "OutStageError",
"path": "aggify/exceptions.py",
"snippet": "class OutStageError(InvalidPipelineStageError):\n def __init__(self, stage):\n self.message = (\n f\"You cannot add a {self!r} pipeline after $out stage! stage : {stage}\"\n )\n super().__init__(self.message)"
},
{
"identifier": "InvalidArgument",
"path": "aggify/exceptions.py",
"snippet": "class InvalidArgument(AggifyBaseException):\n def __init__(self, expected_list: list):\n self.message = f\"Input is not correctly passed, expected {[expected for expected in expected_list]}\"\n self.expecteds = expected_list\n super().__init__(self.message)"
},
{
"identifier": "InvalidProjection",
"path": "aggify/exceptions.py",
"snippet": "class InvalidProjection(AggifyBaseException):\n def __init__(self):\n self.message = \"You can't use inclusion and exclusion together.\"\n super().__init__(self.message)"
},
{
"identifier": "InvalidAnnotateExpression",
"path": "aggify/exceptions.py",
"snippet": "class InvalidAnnotateExpression(AggifyBaseException):\n def __init__(self):\n self.message = \"Invalid expression passed to annotate.\"\n super().__init__(self.message)"
},
{
"identifier": "QueryParams",
"path": "aggify/types.py",
"snippet": ""
},
{
"identifier": "to_mongo_positive_index",
"path": "aggify/utilty.py",
"snippet": "def to_mongo_positive_index(index: Union[int, slice]) -> slice:\n if isinstance(index, int):\n if index < 0:\n raise MongoIndexError\n return slice(0, index, None)\n\n if index.step is not None:\n raise MongoIndexError\n\n if int(index.start) > index.stop:\n raise MongoIndexError\n\n if int(index.start) < 0:\n raise MongoIndexError\n return index"
},
{
"identifier": "validate_field_existence",
"path": "aggify/utilty.py",
"snippet": "def validate_field_existence(model: CollectionType, fields_to_check: List[str]) -> None:\n \"\"\"\n The function checks a list of fields and raises an InvalidField exception if any are missing.\n\n Args:\n model: The model containing the fields to check.\n fields_to_check (list): A list of field names to check for existence in the model.\n\n Raises:\n InvalidField: If any of the specified fields are missing in the model's fields.\n \"\"\"\n missing_fields = [\n field for field in fields_to_check if not model._fields.get(field) # noqa\n ]\n if missing_fields:\n raise InvalidField(field=missing_fields[0])"
},
{
"identifier": "replace_values_recursive",
"path": "aggify/utilty.py",
"snippet": "def replace_values_recursive(obj, replacements):\n \"\"\"\n Replaces let values in a list of match stages.\n\n Args:\n obj: A list of match stages.\n replacements: Key, values to be replaced.\n\n Returns:\n A list of updated match stages.\n \"\"\"\n if isinstance(obj, list):\n updated_stages = []\n for item in obj:\n updated_stages.append(replace_values_recursive(item, replacements))\n return updated_stages\n elif isinstance(obj, dict):\n updated_stage = {}\n for key, value in obj.items():\n updated_stage[key] = replace_values_recursive(value, replacements)\n return updated_stage\n elif str(obj).replace(\"$\", \"\") in replacements:\n return replacements[obj.replace(\"$\", \"\")]\n else:\n return obj"
},
{
"identifier": "convert_match_query",
"path": "aggify/utilty.py",
"snippet": "def convert_match_query(\n d: Dict,\n) -> Union[Dict[Any, Union[List[Union[str, Any]], Dict]], List[Dict], Dict]:\n \"\"\"\n Recursively transform a dictionary to modify the structure of operators.\n\n Args:\n d (dict or any): The input dictionary to be transformed.\n\n Returns:\n dict or any: The transformed dictionary with operators modified.\n\n This function recursively processes the input dictionary, looking for operators\n within sub-dictionaries. When found, it restructures the data into the format {'$eq' or '$ne': [field, value]}.\n For other fields, it processes them recursively to maintain the dictionary structure.\n\n Example:\n original_dict = {'_id': {'$eq': 123456}, 'other_field': {'$ne': 789}, 'dynamic_field': {'$eq': 'dynamic_value'}}\n transformed_dict = transform_dict(original_dict)\n print(transformed_dict)\n # Output: {'$eq': ['_id', 123456], 'other_field': {'$ne': 789}, 'dynamic_field': {'$eq': 'dynamic_value'}}\n \"\"\"\n if isinstance(d, dict):\n new_dict = {}\n for key, value in d.items():\n operators = {\"$eq\", \"$ne\", \"$gt\", \"$lt\", \"$gte\", \"lte\"}\n if isinstance(value, dict) and any(op in value for op in operators):\n for operator, operand in value.items():\n new_dict[operator] = [f\"${key}\", operand]\n else:\n new_dict[key] = convert_match_query(value)\n return new_dict\n elif isinstance(d, list):\n return [convert_match_query(item) for item in d]\n else:\n return d"
},
{
"identifier": "check_field_already_exists",
"path": "aggify/utilty.py",
"snippet": "def check_field_already_exists(model: CollectionType, field: str) -> None:\n \"\"\"\n Check if a field exists in the given model.\n\n Args:\n model (Document): The model to check for the field.\n field (str): The name of the field to check.\n\n Raises:\n AlreadyExistsField: If the field already exists in the model.\n \"\"\"\n if field in [\n f.db_field if hasattr(f, \"db_field\") else k\n for k, f in model._fields.items() # noqa\n ]:\n raise AlreadyExistsField(field=field)"
},
{
"identifier": "get_db_field",
"path": "aggify/utilty.py",
"snippet": "def get_db_field(model: CollectionType, field: str, add_dollar_sign=False) -> str:\n \"\"\"\n Get the database field name for a given field in the model.\n\n Args:\n add_dollar_sign: Add a \"$\" at the start of the field or not\n model (Document): The model containing the field.\n field (str): The name of the field.\n\n Returns:\n str: The database field name if available, otherwise the original field name.\n \"\"\"\n try:\n db_field = model._fields.get(field).db_field # noqa\n db_field = field if db_field is None else db_field\n return f\"${db_field}\" if add_dollar_sign else db_field\n except AttributeError:\n return field"
},
{
"identifier": "copy_class",
"path": "aggify/utilty.py",
"snippet": "def copy_class(original_class):\n \"\"\"\n Copies a class, creating a new class with the same bases and attributes.\n\n Parameters:\n original_class (class): The class to be copied.\n\n Returns:\n class: A new class with the same bases and attributes as the original class.\n \"\"\"\n # Create a new class with the same name, bases, and attributes\n copied_class = type(\n \"Aggify\" + original_class.__name__,\n original_class.__bases__,\n dict(original_class.__dict__),\n )\n return copied_class"
}
] | import functools
from typing import Any, Dict, Type, Union, List, TypeVar, Callable, Tuple
from mongoengine import Document, EmbeddedDocument, fields as mongoengine_fields
from mongoengine.base import TopLevelDocumentMetaclass
from aggify.compiler import F, Match, Q, Operators, Cond # noqa keep
from aggify.exceptions import (
AggifyValueError,
AnnotationError,
InvalidField,
InvalidEmbeddedField,
OutStageError,
InvalidArgument,
InvalidProjection,
InvalidAnnotateExpression,
)
from aggify.types import QueryParams, CollectionType
from aggify.utilty import (
to_mongo_positive_index,
validate_field_existence,
replace_values_recursive,
convert_match_query,
check_field_already_exists,
get_db_field,
copy_class,
) | 5,804 |
class Aggify:
def __init__(self, base_model: Type[Document]):
"""
Initializes the Aggify class.
Args:
base_model: The base model class.
"""
# Create a separate copy of the main class for safety and flexibility
self.base_model = copy_class(base_model)
self.pipelines: List[Dict[str, Union[dict, Any]]] = []
self.start = None
self.stop = None
self.q = None
def __iter__(self):
# Return a generator or iterator for the data you want to represent as a list
return iter(self.pipelines)
@last_out_stage_check
def project(self, **kwargs: QueryParams) -> "Aggify":
"""
Adjusts the base model's fields based on the given keyword arguments.
Fields to be retained are set to 1 in kwargs.
Fields to be deleted are set to 0 in kwargs, except for _id which is controlled by the delete_id flag.
Args:
**kwargs: Fields to be retained or removed.
For example: {"field1": 1, "field2": 0}
_id field behavior: {"id": 0} means delete _id.
Returns:
Aggify: Returns an instance of the Aggify class for potential method chaining.
"""
filtered_kwargs = dict(kwargs)
filtered_kwargs.pop("id", None)
if all([i in filtered_kwargs.values() for i in [0, 1]]):
raise InvalidProjection()
# Extract fields to keep and check if _id should be deleted
to_keep_values = {"id"}
projection = {}
# Add missing fields to the base model
for key, value in kwargs.items():
if value == 1:
to_keep_values.add(key)
elif key not in self.base_model._fields and isinstance( # noqa
kwargs[key], (str, dict)
):
to_keep_values.add(key)
self.base_model._fields[key] = mongoengine_fields.IntField() # noqa
projection[get_db_field(self.base_model, key)] = value # noqa
if value == 0:
del self.base_model._fields[key] # noqa
# Remove fields from the base model, except the ones in to_keep_values and possibly _id
if to_keep_values != {"id"}:
keys_for_deletion = self.base_model._fields.keys() - to_keep_values # noqa
for key in keys_for_deletion:
del self.base_model._fields[key] # noqa
# Append the projection stage to the pipelines
self.pipelines.append({"$project": projection})
# Return the instance for method chaining
return self
@last_out_stage_check
def group(self, expression: Union[str, Dict, List, None] = "id") -> "Aggify":
if isinstance(expression, list):
expression = {
field: f"${self.get_field_name_recursively(field)}"
for field in expression
}
if expression and not isinstance(expression, dict):
try:
expression = "$" + self.get_field_name_recursively(expression)
except InvalidField:
pass
self.pipelines.append({"$group": {"_id": expression}})
return self
@last_out_stage_check
def order_by(self, *order_fields: Union[str, List[str]]) -> "Aggify":
sort_dict = {
get_db_field(self.base_model, field.replace("-", "")): -1
if field.startswith("-")
else 1
for field in order_fields
}
self.pipelines.append({"$sort": sort_dict})
return self
@last_out_stage_check
def raw(self, raw_query: dict) -> "Aggify":
self.pipelines.append(raw_query)
self.pipelines = self.__combine_sequential_matches()
return self
@last_out_stage_check
def add_fields(self, **fields) -> "Aggify": # noqa
"""Generates a MongoDB addFields pipeline stage.
Args:
fields: A dictionary of field expressions and values.
Returns:
A MongoDB add_fields pipeline stage.
"""
add_fields_stage = {"$addFields": {}}
for field, expression in fields.items():
field = field.replace("__", ".")
if isinstance(expression, str):
add_fields_stage["$addFields"][field] = {"$literal": expression}
elif isinstance(expression, F):
add_fields_stage["$addFields"][field] = expression.to_dict()
elif isinstance(expression, (list, dict)):
add_fields_stage["$addFields"][field] = expression
|
AggifyType = TypeVar("AggifyType", bound=Callable[..., "Aggify"])
def last_out_stage_check(method: AggifyType) -> AggifyType:
"""Check if the last stage is $out or not
This decorator check if the last stage is $out or not
MongoDB does not allow adding aggregation pipeline stage after $out stage
"""
@functools.wraps(method)
def decorator(*args, **kwargs):
try:
if bool(args[0].pipelines[-1].get("$out")):
raise OutStageError(method.__name__)
except IndexError:
return method(*args, **kwargs)
else:
return method(*args, **kwargs)
return decorator
class Aggify:
def __init__(self, base_model: Type[Document]):
"""
Initializes the Aggify class.
Args:
base_model: The base model class.
"""
# Create a separate copy of the main class for safety and flexibility
self.base_model = copy_class(base_model)
self.pipelines: List[Dict[str, Union[dict, Any]]] = []
self.start = None
self.stop = None
self.q = None
def __iter__(self):
# Return a generator or iterator for the data you want to represent as a list
return iter(self.pipelines)
@last_out_stage_check
def project(self, **kwargs: QueryParams) -> "Aggify":
"""
Adjusts the base model's fields based on the given keyword arguments.
Fields to be retained are set to 1 in kwargs.
Fields to be deleted are set to 0 in kwargs, except for _id which is controlled by the delete_id flag.
Args:
**kwargs: Fields to be retained or removed.
For example: {"field1": 1, "field2": 0}
_id field behavior: {"id": 0} means delete _id.
Returns:
Aggify: Returns an instance of the Aggify class for potential method chaining.
"""
filtered_kwargs = dict(kwargs)
filtered_kwargs.pop("id", None)
if all([i in filtered_kwargs.values() for i in [0, 1]]):
raise InvalidProjection()
# Extract fields to keep and check if _id should be deleted
to_keep_values = {"id"}
projection = {}
# Add missing fields to the base model
for key, value in kwargs.items():
if value == 1:
to_keep_values.add(key)
elif key not in self.base_model._fields and isinstance( # noqa
kwargs[key], (str, dict)
):
to_keep_values.add(key)
self.base_model._fields[key] = mongoengine_fields.IntField() # noqa
projection[get_db_field(self.base_model, key)] = value # noqa
if value == 0:
del self.base_model._fields[key] # noqa
# Remove fields from the base model, except the ones in to_keep_values and possibly _id
if to_keep_values != {"id"}:
keys_for_deletion = self.base_model._fields.keys() - to_keep_values # noqa
for key in keys_for_deletion:
del self.base_model._fields[key] # noqa
# Append the projection stage to the pipelines
self.pipelines.append({"$project": projection})
# Return the instance for method chaining
return self
@last_out_stage_check
def group(self, expression: Union[str, Dict, List, None] = "id") -> "Aggify":
if isinstance(expression, list):
expression = {
field: f"${self.get_field_name_recursively(field)}"
for field in expression
}
if expression and not isinstance(expression, dict):
try:
expression = "$" + self.get_field_name_recursively(expression)
except InvalidField:
pass
self.pipelines.append({"$group": {"_id": expression}})
return self
@last_out_stage_check
def order_by(self, *order_fields: Union[str, List[str]]) -> "Aggify":
sort_dict = {
get_db_field(self.base_model, field.replace("-", "")): -1
if field.startswith("-")
else 1
for field in order_fields
}
self.pipelines.append({"$sort": sort_dict})
return self
@last_out_stage_check
def raw(self, raw_query: dict) -> "Aggify":
self.pipelines.append(raw_query)
self.pipelines = self.__combine_sequential_matches()
return self
@last_out_stage_check
def add_fields(self, **fields) -> "Aggify": # noqa
"""Generates a MongoDB addFields pipeline stage.
Args:
fields: A dictionary of field expressions and values.
Returns:
A MongoDB add_fields pipeline stage.
"""
add_fields_stage = {"$addFields": {}}
for field, expression in fields.items():
field = field.replace("__", ".")
if isinstance(expression, str):
add_fields_stage["$addFields"][field] = {"$literal": expression}
elif isinstance(expression, F):
add_fields_stage["$addFields"][field] = expression.to_dict()
elif isinstance(expression, (list, dict)):
add_fields_stage["$addFields"][field] = expression | elif isinstance(expression, Cond): | 4 | 2023-10-22 07:53:28+00:00 | 8k |
sotopia-lab/sotopia | sotopia/samplers/uniform_sampler.py | [
{
"identifier": "BaseAgent",
"path": "sotopia/agents/base_agent.py",
"snippet": "class BaseAgent(Generic[ObsType, ActType], MessengerMixin):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n MessengerMixin.__init__(self)\n if agent_profile is not None:\n self.profile = agent_profile\n self.agent_name = (\n self.profile.first_name + \" \" + self.profile.last_name\n )\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = AgentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n self.agent_name = (\n self.profile.first_name + \" \" + self.profile.last_name\n )\n else:\n assert (\n agent_name is not None\n ), \"Either agent_name or uuid_str must be provided\"\n self.agent_name = agent_name\n\n self._goal: str | None = None\n\n @property\n def goal(self) -> str:\n assert (\n self._goal is not None\n ), \"attribute goal has to be set before use\"\n return self._goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(self, obs: ObsType) -> ActType:\n raise NotImplementedError\n\n async def aact(self, obs: ObsType) -> ActType:\n raise NotImplementedError\n\n def reset(self) -> None:\n self.reset_inbox()"
},
{
"identifier": "AgentProfile",
"path": "sotopia/database/persistent_profile.py",
"snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")"
},
{
"identifier": "EnvironmentProfile",
"path": "sotopia/database/persistent_profile.py",
"snippet": "class EnvironmentProfile(JsonModel):\n codename: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The codename of the environment\",\n )\n source: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The source of the environment\",\n )\n scenario: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"A concrete scenario of where the social interaction takes place, the scenario should have two agents (agent1 and agent2), and you should illustrate the relationship between the two agents, and for what purpose agent1 is interacting with agent2. Please avoid mentioning specific names and occupations in the scenario and keep all the mentions gender-neutral. Also avoid generating scenarios that requires childrend (below 18) or elderly (above 70) to be involved.\",\n )\n agent_goals: list[str] = Field(\n default_factory=lambda: [],\n description=\"The social goals of each agent, which could include <extra_info>...</extra_info>, <clarification_hint>...</clarification_hint>, and <strategy_hint>...</strategy_hint> to help the agent achieve the goal. Avoid providing too specific strategy hint, try to be as abstract as possible. For example, use 'you can provide financial benefits to achieve your goal' instead of 'you can buy him a boba tea to achieve your goal.'\",\n )\n relationship: RelationshipType = Field(\n index=True,\n default_factory=lambda: RelationshipType.stranger,\n description=\"The relationship between the two agents, choose from: stranger, know_by_name, acquaintance, friend, romantic_relationship, family_member. Do not make up a relationship, but choose from the list, 0 means stranger, 1 means know_by_name, 2 means acquaintance, 3 means friend, 4 means romantic_relationship, 5 means family_member\",\n )\n age_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The age constraint of the environment, a list of tuples, each tuple is a range of age, e.g., '[(18, 25), (30, 40)]' means the environment is only available to agent one between 18 and 25, and agent two between 30 and 40\",\n )\n occupation_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The occupation constraint of the environment, a list of lists, each list is a list of occupations, e.g., '[['student', 'teacher'], ['doctor', 'nurse']]' means the environment is only available to agent one if agent one is a student or a teacher, and agent two is a doctor or a nurse\",\n )\n agent_constraint: list[list[str]] | None = Field(\n default_factory=lambda: None,\n )"
},
{
"identifier": "ParallelSotopiaEnv",
"path": "sotopia/envs/parallel.py",
"snippet": "class ParallelSotopiaEnv(\n ParallelEnv[str, Observation, AgentAction], MessengerMixin\n):\n def __init__(\n self,\n available_action_types: set[ActionType] = set(\n [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"]\n ),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"simutaneous\",\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n evaluators: list[Evaluator] = [],\n terminal_evaluators: list[Evaluator] = [],\n uuid_str: str | None = None,\n env_profile: EnvironmentProfile | None = None,\n ) -> None:\n \"\"\"A sotopia environment for parallel agents.\n\n Args:\n available_action_types (set[ActionType], optional): The action types that are available to the agents. Defaults to set([\"none\", \"speak\", \"non-verbal communication\", \"action\"]).\n action_order (Literal[\"simutaneous\", \"round-robin\", \"random\"], optional): The order in which the agents take actions. Defaults to \"simutaneous\".\n model_name (LLM_Name, optional): The name of the language model to use. Defaults to \"gpt-3.5-turbo\".\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.background = ScriptBackground(\n scenario=\"\",\n p1_background=\"\",\n p2_background=\"\",\n p1_goal=\"\",\n p2_goal=\"\",\n p1_name=\"\",\n p2_name=\"\",\n )\n\n self.agents = []\n self.action_spaces = {}\n self.available_action_types = list(available_action_types)\n self.action_order = action_order\n self.action_mask: list[bool] = []\n self.evaluators = evaluators\n self.terminal_evaluators = terminal_evaluators\n\n # if an environment profile is provided, use it\n assert (\n env_profile or uuid_str\n ), \"Either env_profile or uuid_str must be provided\"\n if env_profile is not None:\n self.profile = env_profile\n # if a uuid is provided, try to load the environment profile from the database\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = EnvironmentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n\n @configurable\n def reset(\n self,\n seed: int | None = None,\n options: dict[str, str] | None = None,\n agents: Agents | None = None,\n omniscient: bool = False,\n lite: bool = False,\n ) -> dict[str, Observation]:\n \"\"\"Starting a new episode. Must be called before step().\n\n Args:\n seed (int, optional): Seed for the environment. Defaults to None. Not used right now.\n options (dict, optional): Options for the environment. Defaults to None.\n \"partial_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound can be incompleted (\"unknown\" for missing parts), and the missing parts will be filled in by the environment.\n \"full_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound must be completed (no \"unknown\" for missing parts).\n omniscient (bool, optional): Whether the agents know the other agent's goal. Defaults to False.\n \"\"\"\n super().__init__()\n MessengerMixin.reset_inbox(self)\n assert (\n not options\n or not (\"partial_background_file\" in options)\n and not (\"full_background_file\" in options)\n ), \"partial_background_file and full_background_file are not supported anymore\"\n if agents is not None:\n assert agents, \"agents must be provided\"\n assert len(agents) == 2, \"Only supporting two agents right now\"\n agent_names = list(agents.keys())\n agent_goals = self.profile.agent_goals\n assert (\n len(agent_goals) == 2\n ), \"Only supporting two agents right now\"\n\n raw_background = ScriptBackground(\n scenario=self.profile.scenario,\n p1_background=get_bio(\n self.profile.relationship,\n agents[agent_names[0]].profile,\n agent_id=0,\n ),\n p2_background=get_bio(\n self.profile.relationship,\n agents[agent_names[1]].profile,\n agent_id=1,\n ),\n p1_goal=f\"<root viewer='agent_0'>{agent_goals[0]}</root>\",\n p2_goal=f\"<root viewer='agent_1'>{agent_goals[1]}</root>\",\n p1_name=agent_names[0],\n p2_name=agent_names[1],\n )\n\n if lite:\n raw_background.p1_background = \"\"\n raw_background.p2_background = \"\"\n\n self.background = ScriptBackground(\n scenario=render_text_for_environment(raw_background.scenario),\n p1_background=render_text_for_environment(\n raw_background.p1_background\n ),\n p2_background=render_text_for_environment(\n raw_background.p2_background\n ),\n p1_goal=render_text_for_environment(raw_background.p1_goal),\n p2_goal=render_text_for_environment(raw_background.p2_goal),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n else:\n raise ValueError(\"agents must be provided\")\n\n self.agents = [self.background.p1_name, self.background.p2_name]\n agent_backgrounds: list[ScriptBackground] = []\n if omniscient:\n for i in range(self.num_agents):\n agent_backgrounds.append(copy.deepcopy(self.background))\n else:\n for i in range(self.num_agents):\n agent_backgrounds.append(\n ScriptBackground(\n scenario=render_text_for_agent(\n raw_background.scenario, i\n ),\n p1_background=render_text_for_agent(\n raw_background.p1_background, i\n ),\n p2_background=render_text_for_agent(\n raw_background.p2_background, i\n ),\n p1_goal=render_text_for_agent(\n raw_background.p1_goal, i\n ),\n p2_goal=render_text_for_agent(\n raw_background.p2_goal, i\n ),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n )\n background_for_a = agent_backgrounds[0]\n background_for_b = agent_backgrounds[1]\n\n print(\"Is the agent omniscient?\", omniscient)\n if not omniscient:\n background_for_a.p2_goal = \"Unknown\"\n background_for_b.p1_goal = \"Unknown\"\n\n self.action_spaces = {\n agent: Dict(\n dict(\n action_type=Discrete(len(self.available_action_types)),\n argument=Text(256),\n )\n )\n for agent in self.agents\n }\n self.turn_number = 0\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[0] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n\n self.recv_message(\"Environment\", self.background)\n\n return {\n self.background.p1_name: Observation(\n last_turn=background_for_a.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=background_for_b.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n }\n\n @beartype\n def step(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *(\n evaluator(\n turn_number=self.turn_number, messages=self.inbox\n )\n for evaluator in self.evaluators\n )\n )\n )\n )\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n },\n )\n\n @beartype\n async def astep(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.evaluators\n ]\n )\n )\n )\n )\n\n if response.terminated:\n terminal_response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.terminal_evaluators\n ]\n )\n )\n )\n )\n # incorporate terminal response into response\n response.p1_rate = response.p1_rate or terminal_response.p1_rate\n response.p2_rate = response.p2_rate or terminal_response.p2_rate\n if response.comments and terminal_response.comments:\n response.comments += terminal_response.comments\n elif terminal_response.comments:\n response.comments = terminal_response.comments\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n info = {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n }\n if response.terminated:\n info[\"rewards_prompt\"] = {\"overall_prompt\": self.terminal_evaluators[0].prompt} # type: ignore\n\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n info,\n )\n\n def render(self, mode: str = \"human\") -> None:\n pass\n\n def close(self) -> None:\n pass"
},
{
"identifier": "BaseSampler",
"path": "sotopia/samplers/base_sampler.py",
"snippet": "class BaseSampler(Generic[ObsType, ActType]):\n def __init__(\n self,\n env_candidates: Sequence[EnvironmentProfile | str] | None = None,\n agent_candidates: Sequence[AgentProfile | str] | None = None,\n ) -> None:\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:"
}
] | import random
from typing import Any, Generator, Type, TypeVar, cast
from sotopia.agents.base_agent import BaseAgent
from sotopia.database import AgentProfile, EnvironmentProfile
from sotopia.envs.parallel import ParallelSotopiaEnv
from .base_sampler import BaseSampler, EnvAgentCombo | 5,981 |
ObsType = TypeVar("ObsType")
ActType = TypeVar("ActType")
class UniformSampler(BaseSampler[ObsType, ActType]):
def sample(
self,
agent_classes: Type[BaseAgent[ObsType, ActType]]
| list[Type[BaseAgent[ObsType, ActType]]],
n_agent: int = 2,
replacement: bool = True,
size: int = 1,
env_params: dict[str, Any] = {},
agents_params: list[dict[str, Any]] = [{}, {}],
) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:
"""
Sample an environment and `n_agent` agents.
Runtime checks:
1. If `agent_classes` is a list, it should have length `n_agent`.
2. `agents_params` should also be a list of length `n_agent`.
Note: Currently, uniform sampling without replacement is not supported.
This is due to the difficulty of sequentially sampling environment and agents.
In theory, we can reject samples that have been sampled before, but this is not efficient.
Please open an issue if you need this feature.
"""
assert (
not isinstance(agent_classes, list)
or len(agent_classes) == n_agent
), f"agent_classes should be a list of length {n_agent} or a single agent class"
if not isinstance(agent_classes, list):
agent_classes = [agent_classes] * n_agent
assert (
len(agents_params) == n_agent
), f"agents_params should be a list of length {n_agent}"
assert (
replacement
), "Uniform sampling without replacement is not supported yet"
for _ in range(size):
if self.env_candidates:
env_profile = random.choice(self.env_candidates)
if isinstance(env_profile, str):
env_profile = EnvironmentProfile.get(env_profile)
else:
env_profile_id = random.choice(
list(EnvironmentProfile.all_pks())
)
env_profile = EnvironmentProfile.get(env_profile_id)
|
ObsType = TypeVar("ObsType")
ActType = TypeVar("ActType")
class UniformSampler(BaseSampler[ObsType, ActType]):
def sample(
self,
agent_classes: Type[BaseAgent[ObsType, ActType]]
| list[Type[BaseAgent[ObsType, ActType]]],
n_agent: int = 2,
replacement: bool = True,
size: int = 1,
env_params: dict[str, Any] = {},
agents_params: list[dict[str, Any]] = [{}, {}],
) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:
"""
Sample an environment and `n_agent` agents.
Runtime checks:
1. If `agent_classes` is a list, it should have length `n_agent`.
2. `agents_params` should also be a list of length `n_agent`.
Note: Currently, uniform sampling without replacement is not supported.
This is due to the difficulty of sequentially sampling environment and agents.
In theory, we can reject samples that have been sampled before, but this is not efficient.
Please open an issue if you need this feature.
"""
assert (
not isinstance(agent_classes, list)
or len(agent_classes) == n_agent
), f"agent_classes should be a list of length {n_agent} or a single agent class"
if not isinstance(agent_classes, list):
agent_classes = [agent_classes] * n_agent
assert (
len(agents_params) == n_agent
), f"agents_params should be a list of length {n_agent}"
assert (
replacement
), "Uniform sampling without replacement is not supported yet"
for _ in range(size):
if self.env_candidates:
env_profile = random.choice(self.env_candidates)
if isinstance(env_profile, str):
env_profile = EnvironmentProfile.get(env_profile)
else:
env_profile_id = random.choice(
list(EnvironmentProfile.all_pks())
)
env_profile = EnvironmentProfile.get(env_profile_id) | env = ParallelSotopiaEnv(env_profile=env_profile, **env_params) | 3 | 2023-10-23 19:47:26+00:00 | 8k |
Zai-Kun/reverse-engineered-chatgpt | re_gpt/sync_chatgpt.py | [
{
"identifier": "BACKUP_ARKOSE_TOKEN_GENERATOR",
"path": "re_gpt/async_chatgpt.py",
"snippet": "BACKUP_ARKOSE_TOKEN_GENERATOR = \"https://arkose-token-generator.zaieem.repl.co/token\""
},
{
"identifier": "CHATGPT_API",
"path": "re_gpt/async_chatgpt.py",
"snippet": "CHATGPT_API = \"https://chat.openai.com/backend-api/{}\""
},
{
"identifier": "USER_AGENT",
"path": "re_gpt/async_chatgpt.py",
"snippet": "USER_AGENT = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36\""
},
{
"identifier": "AsyncChatGPT",
"path": "re_gpt/async_chatgpt.py",
"snippet": "class AsyncChatGPT:\n def __init__(\n self,\n proxies: Optional[dict] = None,\n session_token: Optional[str] = None,\n exit_callback_function: Optional[Callable] = None,\n auth_token: Optional[str] = None,\n generate_arkose_token: Optional[bool] = False,\n ):\n \"\"\"\n Initializes an instance of the class.\n\n Args:\n proxies (Optional[dict]): A dictionary of proxy settings. Defaults to None.\n session_token (Optional[str]): A session token. Defaults to None.\n exit_callback_function (Optional[callable]): A function to be called on exit. Defaults to None.\n auth_token (Optional[str]): An authentication token. Defaults to None.\n generate_arkose_token (Optional[bool]): Toggle whether to generate and send arkose-token in the payload. Defaults to False.\n \"\"\"\n self.proxies = proxies\n self.exit_callback_function = exit_callback_function\n\n self.arkose = None\n self.binary_path = None\n self.tried_downloading_binary = False\n self.generate_arkose_token = generate_arkose_token\n\n self.session_token = session_token\n self.auth_token = auth_token\n self.session = None\n\n async def __aenter__(self):\n self.session = AsyncSession(\n impersonate=\"chrome110\", timeout=99999, proxies=self.proxies\n )\n if self.generate_arkose_token:\n self.binary_path = await async_get_binary_path(self.session)\n\n if self.binary_path:\n self.arkose = ctypes.CDLL(self.binary_path)\n self.arkose.GetToken.restype = ctypes.c_char_p\n\n self.tried_downloading_binary = True\n\n if not self.auth_token:\n if self.session_token is None:\n raise TokenNotProvided\n self.auth_token = await self.fetch_auth_token()\n\n return self\n\n async def __aexit__(self, *_):\n try:\n if self.exit_callback_function and callable(self.exit_callback_function):\n if not inspect.iscoroutinefunction(self.exit_callback_function):\n self.exit_callback_function(self)\n finally:\n self.session.close()\n\n def build_request_headers(self) -> dict:\n \"\"\"\n Build headers for HTTP requests.\n\n Returns:\n dict: Request headers.\n \"\"\"\n headers = {\n \"User-Agent\": USER_AGENT,\n \"Accept\": \"text/event-stream\",\n \"Accept-Language\": \"en-US\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {self.auth_token}\",\n \"Origin\": \"https://chat.openai.com\",\n \"Alt-Used\": \"chat.openai.com\",\n \"Connection\": \"keep-alive\",\n }\n\n return headers\n\n def get_conversation(self, conversation_id: str) -> AsyncConversation:\n \"\"\"\n Makes an instance of class Conversation and return it.\n\n Args:\n conversation_id (str): The ID of the conversation to fetch.\n\n Returns:\n Conversation: Conversation object.\n \"\"\"\n\n return AsyncConversation(self, conversation_id)\n\n def create_new_conversation(\n self, model: Optional[str] = \"gpt-3.5\"\n ) -> AsyncConversation:\n if model not in MODELS:\n raise InvalidModelName(model, MODELS)\n return AsyncConversation(self, model=model)\n\n async def delete_conversation(self, conversation_id: str) -> dict:\n \"\"\"\n Delete a conversation.\n\n Args:\n conversation_id (str): Unique identifier for the conversation.\n\n Returns:\n dict: Server response json.\n \"\"\"\n url = CHATGPT_API.format(f\"conversation/{conversation_id}\")\n response = await self.session.patch(\n url=url, headers=self.build_request_headers(), json={\"is_visible\": False}\n )\n\n return response.json()\n\n async def fetch_auth_token(self) -> str:\n \"\"\"\n Fetch the authentication token for the session.\n\n Raises:\n InvalidSessionToken: If the session token is invalid.\n\n Returns: authentication token.\n \"\"\"\n url = \"https://chat.openai.com/api/auth/session\"\n cookies = {\"__Secure-next-auth.session-token\": self.session_token}\n\n headers = {\n \"User-Agent\": USER_AGENT,\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Alt-Used\": \"chat.openai.com\",\n \"Connection\": \"keep-alive\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"Sec-GPC\": \"1\",\n \"Cookie\": \"; \".join(\n [\n f\"{cookie_key}={cookie_value}\"\n for cookie_key, cookie_value in cookies.items()\n ]\n ),\n }\n\n response = await self.session.get(url=url, headers=headers)\n response_json = response.json()\n\n if \"accessToken\" in response_json:\n return response_json[\"accessToken\"]\n\n raise InvalidSessionToken\n\n async def set_custom_instructions(\n self,\n about_user: Optional[str] = \"\",\n about_model: Optional[str] = \"\",\n enable_for_new_chats: Optional[bool] = True,\n ) -> dict:\n \"\"\"\n Set cuteom instructions for ChatGPT.\n\n Args:\n about_user (str): What would you like ChatGPT to know about you to provide better responses?\n about_model (str): How would you like ChatGPT to respond?\n enable_for_new_chats (bool): Enable for new chats.\n Returns:\n dict: Server response json.\n \"\"\"\n data = {\n \"about_user_message\": about_user,\n \"about_model_message\": about_model,\n \"enabled\": enable_for_new_chats,\n }\n url = CHATGPT_API.format(\"user_system_messages\")\n response = await self.session.post(\n url=url, headers=self.build_request_headers(), json=data\n )\n\n return response.json()\n\n async def retrieve_chats(\n self, offset: Optional[int] = 0, limit: Optional[int] = 28\n ) -> dict:\n params = {\n \"offset\": offset,\n \"limit\": limit,\n \"order\": \"updated\",\n }\n url = CHATGPT_API.format(\"conversations\")\n response = await self.session.get(\n url=url, params=params, headers=self.build_request_headers()\n )\n\n return response.json()"
},
{
"identifier": "AsyncConversation",
"path": "re_gpt/async_chatgpt.py",
"snippet": "class AsyncConversation:\n def __init__(self, chatgpt, conversation_id=None, model=None):\n self.chatgpt = chatgpt\n self.conversation_id = conversation_id\n self.parent_id = None\n self.model = model\n\n async def fetch_chat(self) -> dict:\n \"\"\"\n Fetches the chat of the conversation from the API.\n\n Returns:\n dict: The JSON response from the API containing the chat if the conversation_id is not none, else returns an empty dict.\n\n Raises:\n UnexpectedResponseError: If the response is not a valid JSON object or if the response json is not in the expected format\n \"\"\"\n if not self.conversation_id:\n return {}\n\n url = CHATGPT_API.format(f\"conversation/{self.conversation_id}\")\n response = await self.chatgpt.session.get(\n url=url, headers=self.chatgpt.build_request_headers()\n )\n\n error = None\n try:\n chat = response.json()\n self.parent_id = list(chat.get(\"mapping\", {}))[-1]\n model_slug = get_model_slug(chat)\n self.model = [\n key for key, value in MODELS.items() if value[\"slug\"] == model_slug\n ][0]\n except Exception as e:\n error = e\n if error is not None:\n raise UnexpectedResponseError(error, response.text)\n\n return chat\n\n async def chat(self, user_input: str) -> AsyncGenerator[dict, None]:\n \"\"\"\n As the name implies, chat with ChatGPT.\n\n Args:\n user_input (str): The user's input message.\n\n Yields:\n dict: A dictionary representing assistant responses.\n\n Returns:\n AsyncGenerator[dict, None]: An asynchronous generator object that yields assistant responses.\n\n Raises:\n UnexpectedResponseError: If the response is not a valid JSON object or if the response json is not in the expected format\n \"\"\"\n\n payload = await self.build_message_payload(user_input)\n\n server_response = (\n \"\" # To store what the server returned for debugging in case of an error\n )\n error = None\n try:\n full_message = None\n while True:\n response = self.send_message(payload=payload)\n async for chunk in response:\n decoded_chunk = chunk.decode()\n\n server_response += decoded_chunk\n for line in decoded_chunk.splitlines():\n if not line.startswith(\"data: \"):\n continue\n\n raw_json_data = line[6:]\n if not (decoded_json := self.decode_raw_json(raw_json_data)):\n continue\n\n if (\n \"message\" in decoded_json\n and decoded_json[\"message\"][\"author\"][\"role\"] == \"assistant\"\n ):\n processed_response = self.filter_response(decoded_json)\n if full_message:\n prev_resp_len = len(\n full_message[\"message\"][\"content\"][\"parts\"][0]\n )\n processed_response[\"content\"] = processed_response[\n \"content\"\n ][prev_resp_len::]\n\n yield processed_response\n full_message = decoded_json\n self.conversation_id = full_message[\"conversation_id\"]\n self.parent_id = full_message[\"message\"][\"id\"]\n if (\n full_message[\"message\"][\"metadata\"][\"finish_details\"][\"type\"]\n == \"max_tokens\"\n ):\n payload = await self.build_message_continuation_payload()\n else:\n break\n except Exception as e:\n error = e\n\n # raising the error outside the 'except' block to prevent the 'During handling of the above exception, another exception occurred' error\n if error is not None:\n raise UnexpectedResponseError(error, server_response)\n\n async def send_message(self, payload: dict) -> AsyncGenerator[bytes, None]:\n \"\"\"\n Send a message payload to the server and receive the response.\n\n Args:\n payload (dict): Payload containing message information.\n\n Yields:\n bytes: Chunk of data received as a response.\n \"\"\"\n response_queue = asyncio.Queue()\n\n async def perform_request():\n def content_callback(chunk):\n response_queue.put_nowait(chunk)\n\n url = CHATGPT_API.format(\"conversation\")\n await self.chatgpt.session.post(\n url=url,\n headers=self.chatgpt.build_request_headers(),\n json=payload,\n content_callback=content_callback,\n )\n await response_queue.put(None)\n\n asyncio.create_task(perform_request())\n\n while True:\n chunk = await response_queue.get()\n if chunk is None:\n break\n yield chunk\n\n async def build_message_payload(self, user_input: str) -> dict:\n \"\"\"\n Build a payload for sending a user message.\n\n Returns:\n dict: Payload containing message information.\n \"\"\"\n if self.conversation_id and (self.parent_id is None or self.model is None):\n await self.fetch_chat() # it will automatically fetch the chat and set the parent id\n\n payload = {\n \"conversation_mode\": {\"conversation_mode\": {\"kind\": \"primary_assistant\"}},\n \"conversation_id\": self.conversation_id,\n \"action\": \"next\",\n \"arkose_token\": await self.arkose_token_generator()\n if self.chatgpt.generate_arkose_token\n or MODELS[self.model][\"needs_arkose_token\"]\n else None,\n \"force_paragen\": False,\n \"history_and_training_disabled\": False,\n \"messages\": [\n {\n \"author\": {\"role\": \"user\"},\n \"content\": {\"content_type\": \"text\", \"parts\": [user_input]},\n \"id\": str(uuid.uuid4()),\n \"metadata\": {},\n }\n ],\n \"model\": MODELS[self.model][\"slug\"],\n \"parent_message_id\": str(uuid.uuid4())\n if not self.parent_id\n else self.parent_id,\n }\n\n return payload\n\n async def build_message_continuation_payload(self) -> dict:\n \"\"\"\n Build a payload for continuing ChatGPT's cut off response.\n\n Returns:\n dict: Payload containing message information for continuation.\n \"\"\"\n payload = {\n \"conversation_mode\": {\"conversation_mode\": {\"kind\": \"primary_assistant\"}},\n \"action\": \"continue\",\n \"arkose_token\": await self.arkose_token_generator()\n if self.chatgpt.generate_arkose_token\n or MODELS[self.model][\"needs_arkose_token\"]\n else None,\n \"conversation_id\": self.conversation_id,\n \"force_paragen\": False,\n \"history_and_training_disabled\": False,\n \"model\": MODELS[self.model][\"slug\"],\n \"parent_message_id\": self.parent_id,\n \"timezone_offset_min\": -300,\n }\n\n return payload\n\n async def arkose_token_generator(self) -> str:\n \"\"\"\n Generate an Arkose token.\n\n Returns:\n str: Arkose token.\n \"\"\"\n if not self.chatgpt.tried_downloading_binary:\n self.chatgpt.binary_path = await async_get_binary_path(self.chatgpt.session)\n\n if self.chatgpt.binary_path:\n self.chatgpt.arkose = ctypes.CDLL(self.chatgpt.binary_path)\n self.chatgpt.arkose.GetToken.restype = ctypes.c_char_p\n\n self.chatgpt.tried_downloading_binary = True\n\n if self.chatgpt.binary_path:\n try:\n result = self.chatgpt.arkose.GetToken()\n return ctypes.string_at(result).decode(\"utf-8\")\n except:\n pass\n\n for _ in range(5):\n response = await self.chatgpt.session.get(BACKUP_ARKOSE_TOKEN_GENERATOR)\n if response.text == \"null\":\n raise BackendError(error_code=505)\n try:\n return response.json()[\"token\"]\n except:\n await asyncio.sleep(0.7)\n\n raise RetryError(website=BACKUP_ARKOSE_TOKEN_GENERATOR)\n\n async def delete(self) -> None:\n \"\"\"\n Deletes the conversation.\n \"\"\"\n if self.conversation_id:\n await self.chatgpt.delete_conversation(self.conversation_id)\n\n self.conversation_id = None\n self.parent_id = None\n\n @staticmethod\n def decode_raw_json(raw_json_data: str) -> dict or bool:\n \"\"\"\n Decode JSON.\n\n Args:\n raw_json_data (str): JSON as a string.\n\n Returns:\n dict: Decoded JSON.\n \"\"\"\n try:\n decoded_json = json.loads(raw_json_data.strip())\n return decoded_json\n except:\n return False\n\n @staticmethod\n def filter_response(response):\n processed_response = {\n \"content\": response[\"message\"][\"content\"][\"parts\"][0],\n \"message_id\": response[\"message\"][\"id\"],\n \"parent_id\": response[\"message\"][\"metadata\"][\"parent_id\"],\n \"conversation_id\": response[\"conversation_id\"],\n }\n\n return processed_response"
},
{
"identifier": "MODELS",
"path": "re_gpt/async_chatgpt.py",
"snippet": "MODELS = {\n \"gpt-4\": {\"slug\": \"gpt-4\", \"needs_arkose_token\": True},\n \"gpt-3.5\": {\"slug\": \"text-davinci-002-render-sha\", \"needs_arkose_token\": False},\n}"
},
{
"identifier": "BackendError",
"path": "re_gpt/errors.py",
"snippet": "class BackendError(Exception):\n def __init__(self, error_code):\n self.error_code = error_code\n self.message = (\n f\"An error occurred on the backend. Error code: {self.error_code}\"\n )\n super().__init__(self.message)"
},
{
"identifier": "InvalidSessionToken",
"path": "re_gpt/errors.py",
"snippet": "class InvalidSessionToken(Exception):\n def __init__(self):\n self.message = \"Invalid session token provided.\"\n super().__init__(self.message)"
},
{
"identifier": "RetryError",
"path": "re_gpt/errors.py",
"snippet": "class RetryError(Exception):\n def __init__(self, website, message=\"Exceeded maximum retries\"):\n self.website = website\n self.message = f\"{message} for website: {website}\"\n super().__init__(self.message)"
},
{
"identifier": "TokenNotProvided",
"path": "re_gpt/errors.py",
"snippet": "class TokenNotProvided(Exception):\n def __init__(self):\n self.message = \"Token not provided. Please pass your '__Secure-next-auth.session-token' as an argument (e.g., ChatGPT.init(session_token=YOUR_TOKEN)).\"\n super().__init__(self.message)"
},
{
"identifier": "UnexpectedResponseError",
"path": "re_gpt/errors.py",
"snippet": "class UnexpectedResponseError(Exception):\n def __init__(self, original_exception, server_response):\n self.original_exception = original_exception\n self.server_response = server_response\n self.message = f\"An unexpected error occurred. Error message: {self.original_exception}.\\nThis is what the server returned: {self.server_response}.\"\n super().__init__(self.message)"
},
{
"identifier": "InvalidModelName",
"path": "re_gpt/errors.py",
"snippet": "class InvalidModelName(Exception):\n def __init__(self, model, avalible_models):\n self.model = model\n self.avalible_models = avalible_models\n self.message = f'\"{model}\" is not a valid model. Avalible models: {[model for model in avalible_models]}'\n super().__init__(self.message)"
},
{
"identifier": "sync_get_binary_path",
"path": "re_gpt/utils.py",
"snippet": "def sync_get_binary_path(session):\n if binary_path is None:\n return None\n\n if not os.path.exists(funcaptcha_bin_folder_path) or not os.path.isdir(\n funcaptcha_bin_folder_path\n ):\n os.mkdir(funcaptcha_bin_folder_path)\n\n if os.path.isfile(binary_path):\n try:\n local_binary_hash = calculate_file_md5(binary_path)\n response = session.get(latest_release_url)\n json_data = response.json()\n\n for line in json_data[\"body\"].splitlines():\n if line.startswith(current_os):\n latest_binary_hash = line.split(\"=\")[-1]\n break\n\n if local_binary_hash != latest_binary_hash:\n file_url = get_file_url(json_data)\n\n sync_download_binary(session, binary_path, file_url)\n except:\n return binary_path\n else:\n response = session.get(latest_release_url)\n json_data = response.json()\n file_url = get_file_url(json_data)\n\n sync_download_binary(session, binary_path, file_url)\n\n return binary_path"
},
{
"identifier": "get_model_slug",
"path": "re_gpt/utils.py",
"snippet": "def get_model_slug(chat):\n for _, message in chat.get(\"mapping\", {}).items():\n if \"message\" in message:\n role = message[\"message\"][\"author\"][\"role\"]\n if role == \"assistant\":\n return message[\"message\"][\"metadata\"][\"model_slug\"]"
}
] | import ctypes
import inspect
import time
import uuid
from queue import Queue
from threading import Thread
from typing import Callable, Generator, Optional
from curl_cffi.requests import Session
from .async_chatgpt import (
BACKUP_ARKOSE_TOKEN_GENERATOR,
CHATGPT_API,
USER_AGENT,
AsyncChatGPT,
AsyncConversation,
MODELS,
)
from .errors import (
BackendError,
InvalidSessionToken,
RetryError,
TokenNotProvided,
UnexpectedResponseError,
InvalidModelName,
)
from .utils import sync_get_binary_path, get_model_slug | 6,893 | auth_token: Optional[str] = None,
):
"""
Initializes an instance of the class.
Args:
proxies (Optional[dict]): A dictionary of proxy settings. Defaults to None.
session_token (Optional[str]): A session token. Defaults to None.
exit_callback_function (Optional[callable]): A function to be called on exit. Defaults to None.
auth_token (Optional[str]): An authentication token. Defaults to None.
"""
super().__init__(
proxies=proxies,
session_token=session_token,
exit_callback_function=exit_callback_function,
auth_token=auth_token,
)
def __enter__(self):
self.session = Session(
impersonate="chrome110", timeout=99999, proxies=self.proxies
)
if self.generate_arkose_token:
self.binary_path = sync_get_binary_path(self.session)
if self.binary_path:
self.arkose = ctypes.CDLL(self.binary_path)
self.arkose.GetToken.restype = ctypes.c_char_p
self.tried_downloading_binary = True
if not self.auth_token:
if self.session_token is None:
raise TokenNotProvided
self.auth_token = self.fetch_auth_token()
return self
def __exit__(self, *args):
try:
if self.exit_callback_function and callable(self.exit_callback_function):
if not inspect.iscoroutinefunction(self.exit_callback_function):
self.exit_callback_function(self)
finally:
self.session.close()
def get_conversation(self, conversation_id: str) -> SyncConversation:
"""
Makes an instance of class Conversation and return it.
Args:
conversation_id (str): The ID of the conversation to fetch.
Returns:
Conversation: Conversation object.
"""
return SyncConversation(self, conversation_id)
def create_new_conversation(
self, model: Optional[str] = "gpt-3.5"
) -> SyncConversation:
if model not in MODELS:
raise InvalidModelName(model, MODELS)
return SyncConversation(self, model=model)
def delete_conversation(self, conversation_id: str) -> dict:
"""
Delete a conversation.
Args:
conversation_id (str): Unique identifier for the conversation.
Returns:
dict: Server response json.
"""
url = CHATGPT_API.format(f"conversation/{conversation_id}")
response = self.session.patch(
url=url, headers=self.build_request_headers(), json={"is_visible": False}
)
return response.json()
def fetch_auth_token(self) -> str:
"""
Fetch the authentication token for the session.
Raises:
InvalidSessionToken: If the session token is invalid.
Returns: authentication token.
"""
url = "https://chat.openai.com/api/auth/session"
cookies = {"__Secure-next-auth.session-token": self.session_token}
headers = {
"User-Agent": USER_AGENT,
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Alt-Used": "chat.openai.com",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Sec-GPC": "1",
"Cookie": "; ".join(
[
f"{cookie_key}={cookie_value}"
for cookie_key, cookie_value in cookies.items()
]
),
}
response = self.session.get(url=url, headers=headers)
response_json = response.json()
if "accessToken" in response_json:
return response_json["accessToken"]
|
class SyncConversation(AsyncConversation):
def __init__(self, chatgpt, conversation_id: Optional[str] = None, model=None):
super().__init__(chatgpt, conversation_id, model)
def fetch_chat(self) -> dict:
"""
Fetches the chat of the conversation from the API.
Returns:
dict: The JSON response from the API containing the chat if the conversation_id is not none, else returns an empty dict.
Raises:
UnexpectedResponseError: If the response is not a valid JSON object or if the response json is not in the expected format
"""
if not self.conversation_id:
return {}
url = CHATGPT_API.format(f"conversation/{self.conversation_id}")
response = self.chatgpt.session.get(
url=url, headers=self.chatgpt.build_request_headers()
)
error = None
try:
chat = response.json()
self.parent_id = list(chat.get("mapping", {}))[-1]
model_slug = get_model_slug(chat)
self.model = [
key for key, value in MODELS.items() if value["slug"] == model_slug
][0]
except Exception as e:
error = e
if error is not None:
raise UnexpectedResponseError(error, response.text)
return chat
def chat(self, user_input: str) -> Generator[dict, None, None]:
"""
As the name implies, chat with ChatGPT.
Args:
user_input (str): The user's input message.
Yields:
dict: A dictionary representing assistant responses.
Returns:
Generator[dict, None]: A generator object that yields assistant responses.
Raises:
UnexpectedResponseError: If the response is not a valid JSON object or if the response json is not in the expected format
"""
payload = self.build_message_payload(user_input)
server_response = (
"" # To store what the server returned for debugging in case of an error
)
error = None
try:
full_message = None
while True:
response = self.send_message(payload=payload)
for chunk in response:
decoded_chunk = chunk.decode()
server_response += decoded_chunk
for line in decoded_chunk.splitlines():
if not line.startswith("data: "):
continue
raw_json_data = line[6:]
if not (decoded_json := self.decode_raw_json(raw_json_data)):
continue
if (
"message" in decoded_json
and decoded_json["message"]["author"]["role"] == "assistant"
):
processed_response = self.filter_response(decoded_json)
if full_message:
prev_resp_len = len(
full_message["message"]["content"]["parts"][0]
)
processed_response["content"] = processed_response[
"content"
][prev_resp_len::]
yield processed_response
full_message = decoded_json
self.conversation_id = full_message["conversation_id"]
self.parent_id = full_message["message"]["id"]
if (
full_message["message"]["metadata"]["finish_details"]["type"]
== "max_tokens"
):
payload = self.build_message_continuation_payload()
else:
break
except Exception as e:
error = e
# raising the error outside the 'except' block to prevent the 'During handling of the above exception, another exception occurred' error
if error is not None:
raise UnexpectedResponseError(error, server_response)
def send_message(self, payload: dict) -> Generator[bytes, None, None]:
"""
Send a message payload to the server and receive the response.
Args:
payload (dict): Payload containing message information.
Yields:
bytes: Chunk of data received as a response.
"""
response_queue = Queue()
def perform_request():
def content_callback(chunk):
response_queue.put(chunk)
url = CHATGPT_API.format("conversation")
response = self.chatgpt.session.post(
url=url,
headers=self.chatgpt.build_request_headers(),
json=payload,
content_callback=content_callback,
)
response_queue.put(None)
Thread(target=perform_request).start()
while True:
chunk = response_queue.get()
if chunk is None:
break
yield chunk
def build_message_payload(self, user_input: str) -> dict:
"""
Build a payload for sending a user message.
Returns:
dict: Payload containing message information.
"""
if self.conversation_id and (self.parent_id is None or self.model is None):
self.fetch_chat() # it will automatically fetch the chat and set the parent id
payload = {
"conversation_mode": {"conversation_mode": {"kind": "primary_assistant"}},
"conversation_id": self.conversation_id,
"action": "next",
"arkose_token": self.arkose_token_generator()
if self.chatgpt.generate_arkose_token
or MODELS[self.model]["needs_arkose_token"]
else None,
"force_paragen": False,
"history_and_training_disabled": False,
"messages": [
{
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [user_input]},
"id": str(uuid.uuid4()),
"metadata": {},
}
],
"model": MODELS[self.model]["slug"],
"parent_message_id": str(uuid.uuid4())
if not self.parent_id
else self.parent_id,
}
return payload
def build_message_continuation_payload(self) -> dict:
"""
Build a payload for continuing ChatGPT's cut off response.
Returns:
dict: Payload containing message information for continuation.
"""
payload = {
"conversation_mode": {"conversation_mode": {"kind": "primary_assistant"}},
"action": "continue",
"arkose_token": self.arkose_token_generator()
if self.chatgpt.generate_arkose_token
or MODELS[self.model]["needs_arkose_token"]
else None,
"conversation_id": self.conversation_id,
"force_paragen": False,
"history_and_training_disabled": False,
"model": MODELS[self.model]["slug"],
"parent_message_id": self.parent_id,
"timezone_offset_min": -300,
}
return payload
def arkose_token_generator(self) -> str:
"""
Generate an Arkose token.
Returns:
str: Arkose token.
"""
if not self.chatgpt.tried_downloading_binary:
self.chatgpt.binary_path = sync_get_binary_path(self.chatgpt.session)
if self.chatgpt.binary_path:
self.chatgpt.arkose = ctypes.CDLL(self.chatgpt.binary_path)
self.chatgpt.arkose.GetToken.restype = ctypes.c_char_p
self.chatgpt.tried_downloading_binary = True
if self.chatgpt.binary_path:
try:
result = self.chatgpt.arkose.GetToken()
return ctypes.string_at(result).decode("utf-8")
except:
pass
for _ in range(5):
response = self.chatgpt.session.get(BACKUP_ARKOSE_TOKEN_GENERATOR)
if response.text == "null":
raise BackendError(error_code=505)
try:
return response.json()["token"]
except:
time.sleep(0.7)
raise RetryError(website=BACKUP_ARKOSE_TOKEN_GENERATOR)
def delete(self) -> None:
"""
Deletes the conversation.
"""
if self.conversation_id:
self.chatgpt.delete_conversation(self.conversation_id)
self.conversation_id = None
self.parent_id = None
class SyncChatGPT(AsyncChatGPT):
def __init__(
self,
proxies: Optional[dict] = None,
session_token: Optional[str] = None,
exit_callback_function: Optional[Callable] = None,
auth_token: Optional[str] = None,
):
"""
Initializes an instance of the class.
Args:
proxies (Optional[dict]): A dictionary of proxy settings. Defaults to None.
session_token (Optional[str]): A session token. Defaults to None.
exit_callback_function (Optional[callable]): A function to be called on exit. Defaults to None.
auth_token (Optional[str]): An authentication token. Defaults to None.
"""
super().__init__(
proxies=proxies,
session_token=session_token,
exit_callback_function=exit_callback_function,
auth_token=auth_token,
)
def __enter__(self):
self.session = Session(
impersonate="chrome110", timeout=99999, proxies=self.proxies
)
if self.generate_arkose_token:
self.binary_path = sync_get_binary_path(self.session)
if self.binary_path:
self.arkose = ctypes.CDLL(self.binary_path)
self.arkose.GetToken.restype = ctypes.c_char_p
self.tried_downloading_binary = True
if not self.auth_token:
if self.session_token is None:
raise TokenNotProvided
self.auth_token = self.fetch_auth_token()
return self
def __exit__(self, *args):
try:
if self.exit_callback_function and callable(self.exit_callback_function):
if not inspect.iscoroutinefunction(self.exit_callback_function):
self.exit_callback_function(self)
finally:
self.session.close()
def get_conversation(self, conversation_id: str) -> SyncConversation:
"""
Makes an instance of class Conversation and return it.
Args:
conversation_id (str): The ID of the conversation to fetch.
Returns:
Conversation: Conversation object.
"""
return SyncConversation(self, conversation_id)
def create_new_conversation(
self, model: Optional[str] = "gpt-3.5"
) -> SyncConversation:
if model not in MODELS:
raise InvalidModelName(model, MODELS)
return SyncConversation(self, model=model)
def delete_conversation(self, conversation_id: str) -> dict:
"""
Delete a conversation.
Args:
conversation_id (str): Unique identifier for the conversation.
Returns:
dict: Server response json.
"""
url = CHATGPT_API.format(f"conversation/{conversation_id}")
response = self.session.patch(
url=url, headers=self.build_request_headers(), json={"is_visible": False}
)
return response.json()
def fetch_auth_token(self) -> str:
"""
Fetch the authentication token for the session.
Raises:
InvalidSessionToken: If the session token is invalid.
Returns: authentication token.
"""
url = "https://chat.openai.com/api/auth/session"
cookies = {"__Secure-next-auth.session-token": self.session_token}
headers = {
"User-Agent": USER_AGENT,
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Alt-Used": "chat.openai.com",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Sec-GPC": "1",
"Cookie": "; ".join(
[
f"{cookie_key}={cookie_value}"
for cookie_key, cookie_value in cookies.items()
]
),
}
response = self.session.get(url=url, headers=headers)
response_json = response.json()
if "accessToken" in response_json:
return response_json["accessToken"]
| raise InvalidSessionToken | 7 | 2023-10-17 08:34:04+00:00 | 8k |
qualabs/video-headline | player/views.py | [
{
"identifier": "Media",
"path": "video/models/media.py",
"snippet": "class Media(models.Model):\n '''\n Constants to represent the `state`s of the Video\n '''\n\n class State:\n WAITING_FILE = 'waiting_file'\n QUEUING_FAILED = 'queuing_failed'\n QUEUED = 'queued'\n PROCESSING = 'processing'\n PROCESSING_FAILED = 'processing_failed'\n FINISHED = 'finished'\n NOT_FINISHED = 'not_finished'\n FAILED = 'failed'\n\n CHOICES = (\n (WAITING_FILE, WAITING_FILE),\n (QUEUING_FAILED, QUEUING_FAILED),\n (QUEUED, QUEUED),\n (PROCESSING, PROCESSING),\n (PROCESSING_FAILED, PROCESSING_FAILED),\n (FINISHED, FINISHED)\n )\n\n AUTOPLAY_CHOICES = (('c', 'Channel'), ('y', 'Yes'), ('n', 'No'))\n\n MEDIA_TYPE_CHOICES = (('audio', 'Audio'), ('video', 'Video'))\n\n video_id = models.CharField(max_length=36,\n default=uuid.uuid4,\n unique=True,\n db_index=True,\n verbose_name='Content ID')\n\n name = models.CharField(max_length=254,\n verbose_name='Name')\n\n created_by = models.ForeignKey(Account,\n models.SET_NULL,\n related_name='uploaded_videos',\n verbose_name='Created by',\n null=True)\n\n organization = models.ForeignKey(Organization,\n models.CASCADE,\n related_name='media',\n verbose_name='Organization')\n\n channel = models.ForeignKey(Channel,\n models.CASCADE,\n null=True,\n blank=True,\n related_name='media',\n verbose_name='Channel')\n\n tags = models.ManyToManyField(Tag,\n related_name='media',\n verbose_name='Tags',\n blank=True)\n\n state = FSMField(default=State.WAITING_FILE,\n verbose_name='Video State',\n choices=State.CHOICES,\n protected=True)\n\n metadata = JSONField(\n max_length=500, blank=True, default={},\n verbose_name='Metadata'\n )\n\n ads_vast_url = models.URLField(\n blank=True,\n null=True,\n max_length=1024,\n verbose_name='VAST URL (ads)'\n )\n\n enable_ads = models.BooleanField(\n default=True,\n verbose_name='Enable Ads?'\n )\n\n autoplay = models.CharField(\n max_length=1,\n default='c',\n choices=AUTOPLAY_CHOICES,\n verbose_name='Autoplay?'\n )\n\n created_at = models.DateTimeField(\n editable=False,\n default=timezone.now,\n verbose_name='Created'\n )\n\n media_type = models.CharField(\n max_length=5,\n default='video',\n choices=MEDIA_TYPE_CHOICES,\n verbose_name='Content Type'\n )\n \n has_thumbnail = models.BooleanField(\n default=False,\n verbose_name='Has custom thumbnail?'\n )\n\n storage = models.BigIntegerField(default=0,\n verbose_name='Size in bytes')\n\n duration = models.IntegerField(default=0,\n verbose_name='Duration in seconds')\n\n def __str__(self):\n return f'{self.video_id} ({self.name})'\n\n class Meta:\n verbose_name = 'Content'\n verbose_name_plural = 'Contents'\n\n def get_urls(self):\n channel = self.channel\n\n # Hacky patch. Don't know how you'd get into this state!\n if channel is None:\n return \"\", \"\", \"\"\n \n media_url = ''\n\n # Default mime type for video\n mime_type = 'application/x-mpegURL'\n poster_url = ''\n\n if self.media_type == 'video':\n media_url = f'https://{channel.cf_domain}/{self.video_id}/hls/output.m3u8'\n poster_url = f'https://{channel.cf_domain}/{self.video_id}/thumbs/thumb_high.0000000.jpg'\n\n elif self.media_type == 'audio':\n media_url = f'https://{channel.cf_domain}/{self.video_id}/audio/output.mp4'\n mime_type = 'audio/mp4'\n\n thumb_path = 'thumb.jpg' if self.has_thumbnail else 'thumbs/thumb_high.0000000.jpg'\n poster_url = f'https://{channel.cf_domain}/{self.video_id}/{thumb_path}'\n\n return poster_url, media_url, mime_type\n\n @transition(field=state, source=State.WAITING_FILE, target=State.QUEUED)\n def _to_queued(self):\n pass\n\n @transition(field=state, source=State.WAITING_FILE, target=State.QUEUING_FAILED)\n def _to_queued_failed(self):\n pass\n\n @transition(field=state, source=State.QUEUED, target=State.PROCESSING)\n def _to_processing(self):\n pass\n\n @transition(field=state, source=State.PROCESSING, target=State.PROCESSING_FAILED)\n def _to_processing_failed(self):\n pass\n\n @transition(field=state, source=[State.PROCESSING, State.QUEUED], target=State.FINISHED)\n def _to_finished(self):\n pass\n\n @transition(field=state,\n source=[State.FINISHED, State.PROCESSING_FAILED, State.FAILED,\n State.QUEUING_FAILED],\n target=State.QUEUED)\n def _re_process(self):\n pass\n\n def to_queued(self):\n self._to_queued()\n # send video to transcode\n mediaconvert.transcode(self)\n self.save()\n\n def to_queued_failed(self):\n self._to_queued_failed()\n self.save()\n\n def to_processing(self):\n self._to_processing()\n self.save()\n\n def to_processing_failed(self):\n self._to_processing_failed()\n self.save()\n\n def to_finished(self):\n self._to_finished()\n self.storage = s3.get_size(self.organization, self.organization.bucket_name, self.video_id)\n self.save()\n\n def re_process(self):\n self._re_process()\n self.metadata = {}\n\n # Delete files on S3\n s3.delete_object(self.organization.bucket_name, '{}/thumb'.format(self.video_id),\n self.organization.aws_account)\n s3.delete_object(self.organization.bucket_name, '{}/hls'.format(self.video_id),\n self.organization.aws_account)\n\n # Invalidate cache on CloudFront\n cloudfront.create_invalidation(self.organization, self.channel.cf_id, [\n '/{}/thumb/*'.format(self.video_id),\n '/{}/hls/*'.format(self.video_id)\n ])\n\n mediaconvert.transcode(self)\n self.save()"
},
{
"identifier": "LiveVideo",
"path": "video/models/live.py",
"snippet": "class LiveVideo(models.Model):\n '''\n Constants to represent the state`s of the Streaming\n '''\n\n class State:\n OFF = 'off'\n ON = 'on'\n STARTING = 'starting'\n STOPPING = 'stopping'\n WAITING_INPUT = 'waiting_input'\n DELETING = 'deleting'\n\n CHOICES = (\n (OFF, OFF),\n (ON, ON),\n (STARTING, STARTING),\n (STOPPING, STOPPING)\n )\n\n class GeoType:\n WHITELIST = 'whitelist'\n BLACKLIST = 'blacklist'\n NONE = 'none'\n\n CHOICES = (\n (WHITELIST, WHITELIST),\n (BLACKLIST, BLACKLIST),\n (NONE, NONE)\n )\n\n AUTOPLAY_CHOICES = (('c', 'Channel'), ('y', 'Yes'), ('n', 'No'))\n\n video_id = models.CharField(max_length=36,\n default=uuid.uuid4,\n unique=True,\n db_index=True,\n verbose_name='Video ID')\n\n name = models.CharField(max_length=254,\n verbose_name='Name')\n\n created_by = models.ForeignKey(Account,\n models.SET_NULL,\n related_name='uploaded_live_video',\n verbose_name='Created by',\n null=True)\n\n organization = models.ForeignKey(Organization,\n models.CASCADE,\n related_name='live_videos',\n verbose_name='Organization')\n\n channel = models.ForeignKey(Channel,\n models.CASCADE,\n null=True,\n blank=True,\n related_name='live_videos',\n verbose_name='Channel')\n\n tags = models.ManyToManyField(Tag,\n related_name='live_videos',\n verbose_name='Tags',\n blank=True)\n\n state = FSMField(default=State.OFF,\n verbose_name='Live Video state',\n choices=State.CHOICES,\n protected=True)\n\n input_state = ArrayField(models.CharField(max_length=255,\n default='',\n verbose_name='Origin state'),\n default=list,\n blank=True)\n\n metadata = JSONField(max_length=500,\n blank=True,\n default=dict,\n verbose_name='Metadata')\n\n ads_vast_url = models.URLField(blank=True,\n null=True,\n max_length=1024,\n verbose_name='VAST URL (ads)')\n\n enable_ads = models.BooleanField(default=True,\n verbose_name='Enable Ads?')\n\n created_at = models.DateTimeField(editable=False,\n default=timezone.now,\n verbose_name='Created')\n\n ml_input_url = models.CharField(max_length=254,\n editable=False,\n default='',\n verbose_name='Input Url')\n\n ml_input_id = models.CharField(max_length=36,\n editable=False,\n default='',\n verbose_name='Input Id')\n\n ml_channel_arn = models.CharField(max_length=254,\n editable=False,\n default='',\n verbose_name='Channel Arn')\n\n sns_topic_arn = models.CharField(max_length=254,\n editable=False,\n default='',\n verbose_name='Topic Arn')\n\n autoplay = models.CharField(max_length=1,\n default='c',\n choices=AUTOPLAY_CHOICES,\n verbose_name='Autoplay?'\n )\n\n cf_id = models.CharField(max_length=100,\n verbose_name='Cf_id',\n editable=False,\n default='')\n\n cf_domain = models.CharField(max_length=100,\n verbose_name='Cf_domain',\n editable=False,\n default='')\n\n geolocation_type = models.CharField(max_length=20,\n editable=True,\n choices=GeoType.CHOICES,\n default=GeoType.NONE,\n verbose_name='Geolocation Type'\n )\n\n geolocation_countries = ArrayField(models.CharField(max_length=2,\n editable=True,\n default='',\n verbose_name='Geolocation Countries'),\n default=list,\n blank=True\n )\n\n def __init__(self, *args, **kwargs):\n super(LiveVideo, self).__init__(*args, **kwargs)\n self._old_geolocation_type = self.geolocation_type\n self._old_geolocation_countries = self.geolocation_countries\n self._old_channel = self.channel\n\n def __str__(self):\n return f'{self.video_id} ({self.name})'\n\n def ml_channel_id(self):\n return self.ml_channel_arn.split(':')[-1]\n\n class Meta:\n verbose_name = 'Live Video'\n verbose_name_plural = 'Live Videos'\n\n @transition(field=state, source=[State.STOPPING], target=State.OFF)\n def _to_off(self):\n pass\n\n @transition(field=state, source=[State.STARTING], target=State.WAITING_INPUT)\n def _to_waiting(self):\n pass\n\n @transition(field=state, source=[State.WAITING_INPUT], target=State.ON)\n def _to_on(self):\n pass\n\n @transition(field=state, source=[State.STOPPING, State.OFF], target=State.STARTING)\n def _to_starting(self):\n pass\n\n @transition(field=state, source=[State.STARTING, State.ON, State.WAITING_INPUT], target=State.STOPPING)\n def _to_stopping(self):\n pass\n\n @transition(field=state, source=[State.OFF], target=State.DELETING)\n def _to_deleting(self):\n pass\n \n def to_starting(self):\n self._to_starting()\n medialive.start_channel(self)\n self.save()\n\n def to_stopping(self):\n self._to_stopping()\n medialive.stop_channel(self)\n self.save()\n\n def to_waiting(self):\n self._to_waiting()\n cloudwatchlogs.check_input_state(self)\n self.save()\n\n def to_on(self):\n self._to_on()\n self.save()\n\n def to_off(self):\n self._to_off()\n self.input_state.clear()\n self.save()\n \n def to_deleting(self):\n self._to_deleting()\n self.save()\n channel_id = self.ml_channel_arn.split(':')[-1] \n account_id = self.organization.aws_account.account_id\n video_id = self.video_id\n try:\n medialive.delete_channel(channel_id, account_id)\n except medialive.ChannelNotFoundException:\n pass\n finally:\n medialive.delete_input(self.ml_input_id,account_id)\n cloudfront.update_distribution(self.organization, self.cf_id, False)\n cloudwatchevents.remove_targets(self)\n cloudwatchevents.delete_rule(self)\n sns.unsubscribe_all(self)\n sns.delete_topic(self)\n cloudfront._delete_cloudfront_distribution.delay(self.cf_id, account_id, video_id)"
}
] | from base64 import b64decode
from django.conf import settings
from django.http import Http404
from django.utils.safestring import mark_safe
from django.views.generic.base import TemplateView
from video.models import Media, LiveVideo
import re | 3,714 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class EmbedView(TemplateView):
template_name = "player/index.html"
def validate_domain(self, channel_allowed_domains, referer_domain):
allowed_domains = settings.ALLOWED_DOMAINS + channel_allowed_domains
if len(channel_allowed_domains) == 0:
return True
for allowed_domain in allowed_domains:
secondary = allowed_domain
allowed_domain = re.escape(allowed_domain).replace('\\*', '[a-zA-Z0-9_-]+')
allowed_domain = re.compile(allowed_domain)
if allowed_domain.match(str(referer_domain)):
return True
return False
def get_context_data(self, **kwargs):
context = super(EmbedView, self).get_context_data(**kwargs)
poster_url, video, video_url, mime_type = self.get_video_data(kwargs.get('video_id'))
channel = video.channel
organization = video.organization
if not organization.traffic_enabled:
context['error'] = True
context['message'] = 'The content is not available.'
return context
referer = self.request.META.get('HTTP_REFERER')
referer_domain = None
if referer:
regex_domain = r'^(?:https?:\/\/)?(?:[^@\/\n]+@)?([^:\/?\n]+)'
referer_domain = re.match(regex_domain, referer).group(1)
adTagUrl = mark_safe(
video.ads_vast_url or channel.ads_vast_url or ''
) if video.enable_ads else mark_safe('')
if video.autoplay == 'c':
autoplay = channel.autoplay
else:
autoplay = video.autoplay == 'y'
if not autoplay:
autoplay = ''
if self.validate_domain(channel.allowed_domains, referer_domain):
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class EmbedView(TemplateView):
template_name = "player/index.html"
def validate_domain(self, channel_allowed_domains, referer_domain):
allowed_domains = settings.ALLOWED_DOMAINS + channel_allowed_domains
if len(channel_allowed_domains) == 0:
return True
for allowed_domain in allowed_domains:
secondary = allowed_domain
allowed_domain = re.escape(allowed_domain).replace('\\*', '[a-zA-Z0-9_-]+')
allowed_domain = re.compile(allowed_domain)
if allowed_domain.match(str(referer_domain)):
return True
return False
def get_context_data(self, **kwargs):
context = super(EmbedView, self).get_context_data(**kwargs)
poster_url, video, video_url, mime_type = self.get_video_data(kwargs.get('video_id'))
channel = video.channel
organization = video.organization
if not organization.traffic_enabled:
context['error'] = True
context['message'] = 'The content is not available.'
return context
referer = self.request.META.get('HTTP_REFERER')
referer_domain = None
if referer:
regex_domain = r'^(?:https?:\/\/)?(?:[^@\/\n]+@)?([^:\/?\n]+)'
referer_domain = re.match(regex_domain, referer).group(1)
adTagUrl = mark_safe(
video.ads_vast_url or channel.ads_vast_url or ''
) if video.enable_ads else mark_safe('')
if video.autoplay == 'c':
autoplay = channel.autoplay
else:
autoplay = video.autoplay == 'y'
if not autoplay:
autoplay = ''
if self.validate_domain(channel.allowed_domains, referer_domain): | if video.state not in [LiveVideo.State.ON, Media.State.FINISHED]: | 1 | 2023-10-17 19:44:32+00:00 | 8k |
Qualcomm-AI-research/geometric-algebra-transformer | tests/gatr/layers/test_linear.py | [
{
"identifier": "EquiLinear",
"path": "gatr/layers/linear.py",
"snippet": "class EquiLinear(nn.Module):\n \"\"\"Pin-equivariant linear layer.\n\n The forward pass maps multivector inputs with shape (..., in_channels, 16) to multivector\n outputs with shape (..., out_channels, 16) as\n\n ```\n outputs[..., j, y] = sum_{i, b, x} weights[j, i, b] basis_map[b, x, y] inputs[..., i, x]\n ```\n\n plus an optional bias term for outputs[..., :, 0] (biases in other multivector components would\n break equivariance).\n\n Here basis_map are precomputed (see gatr.primitives.linear) and weights are the\n learnable weights of this layer.\n\n If there are auxiliary input scalars, they transform under a linear layer, and mix with the\n scalar components the multivector data. Note that in this layer (and only here) the auxiliary\n scalars are optional.\n\n This layer supports four initialization schemes:\n - \"default\": preserves (or actually slightly reducing) the variance of the data in\n the forward pass\n - \"small\": variance of outputs is approximately one order of magnitude smaller\n than for \"default\"\n - \"unit_scalar\": outputs will be close to (1, 0, 0, ..., 0)\n - \"almost_unit_scalar\": similar to \"unit_scalar\", but with more stochasticity\n\n Parameters\n ----------\n in_mv_channels : int\n Input multivector channels\n out_mv_channels : int\n Output multivector channels\n bias : bool\n Whether a bias term is added to the scalar component of the multivector outputs\n in_s_channels : int or None\n Input scalar channels. If None, no scalars are expected nor returned.\n out_s_channels : int or None\n Output scalar channels. If None, no scalars are expected nor returned.\n initialization : {\"default\", \"small\", \"unit_scalar\", \"almost_unit_scalar\"}\n Initialization scheme. For \"default\", initialize with the same philosophy as most\n networks do: preserve variance (approximately) in the forward pass. For \"small\",\n initalize the network such that the variance of the output data is approximately one\n order of magnitude smaller than that of the input data. For \"unit_scalar\", initialize\n the layer such that the output multivectors will be closer to (1, 0, 0, ..., 0).\n \"almost_unit_scalar\" is similar, but with more randomness.\n \"\"\"\n\n def __init__(\n self,\n in_mv_channels: int,\n out_mv_channels: int,\n in_s_channels: Optional[int] = None,\n out_s_channels: Optional[int] = None,\n bias: bool = True,\n initialization: str = \"default\",\n ) -> None:\n super().__init__()\n\n # Check inputs\n if initialization == \"unit_scalar\":\n assert bias, \"unit_scalar initialization requires bias\"\n if in_s_channels is None:\n raise NotImplementedError(\n \"unit_scalar initialization is currently only implemented for scalar inputs\"\n )\n\n self._in_mv_channels = in_mv_channels\n\n # MV -> MV\n self.weight = nn.Parameter(\n torch.empty((out_mv_channels, in_mv_channels, NUM_PIN_LINEAR_BASIS_ELEMENTS))\n )\n\n # We only need a separate bias here if that isn't already covered by the linear map from\n # scalar inputs\n self.bias = (\n nn.Parameter(torch.zeros((out_mv_channels, 1)))\n if bias and in_s_channels is None\n else None\n )\n\n # Scalars -> MV scalars\n self.s2mvs: Optional[nn.Linear]\n if in_s_channels:\n self.s2mvs = nn.Linear(in_s_channels, out_mv_channels, bias=bias)\n else:\n self.s2mvs = None\n\n # MV scalars -> scalars\n if out_s_channels:\n self.mvs2s = nn.Linear(in_mv_channels, out_s_channels, bias=bias)\n else:\n self.mvs2s = None\n\n # Scalars -> scalars\n if in_s_channels is not None and out_s_channels is not None:\n self.s2s = nn.Linear(\n in_s_channels, out_s_channels, bias=False\n ) # Bias would be duplicate\n else:\n self.s2s = None\n\n # Initialization\n self.reset_parameters(initialization)\n\n def forward(\n self, multivectors: torch.Tensor, scalars: Optional[torch.Tensor] = None\n ) -> Tuple[torch.Tensor, Union[torch.Tensor, None]]:\n \"\"\"Maps input multivectors and scalars using the most general equivariant linear map.\n\n The result is again multivectors and scalars.\n\n For multivectors we have:\n ```\n outputs[..., j, y] = sum_{i, b, x} weights[j, i, b] basis_map[b, x, y] inputs[..., i, x]\n = sum_i linear(inputs[..., i, :], weights[j, i, :])\n ```\n\n Here basis_map are precomputed (see gatr.primitives.linear) and weights are the\n learnable weights of this layer.\n\n Parameters\n ----------\n multivectors : torch.Tensor with shape (..., in_mv_channels, 16)\n Input multivectors\n scalars : None or torch.Tensor with shape (..., in_s_channels)\n Optional input scalars\n\n Returns\n -------\n outputs_mv : torch.Tensor with shape (..., out_mv_channels, 16)\n Output multivectors\n outputs_s : None or torch.Tensor with shape (..., out_s_channels)\n Output scalars, if scalars are provided. Otherwise None.\n \"\"\"\n\n outputs_mv = equi_linear(multivectors, self.weight) # (..., out_channels, 16)\n\n if self.bias is not None:\n bias = embed_scalar(self.bias)\n outputs_mv = outputs_mv + bias\n\n if self.s2mvs is not None and scalars is not None:\n outputs_mv[..., 0] += self.s2mvs(scalars)\n\n if self.mvs2s is not None:\n outputs_s = self.mvs2s(multivectors[..., 0])\n if self.s2s is not None and scalars is not None:\n outputs_s = outputs_s + self.s2s(scalars)\n else:\n outputs_s = None\n\n return outputs_mv, outputs_s\n\n def reset_parameters(\n self,\n initialization: str,\n gain: float = 1.0,\n additional_factor=1.0 / np.sqrt(3.0),\n use_mv_heuristics=True,\n ) -> None:\n \"\"\"Initializes the weights of the layer.\n\n Parameters\n ----------\n initialization : {\"default\", \"small\", \"unit_scalar\", \"almost_unit_scalar\"}\n Initialization scheme. For \"default\", initialize with the same philosophy as most\n networks do: preserve variance (approximately) in the forward pass. For \"small\",\n initalize the network such that the variance of the output data is approximately one\n order of magnitude smaller than that of the input data. For \"unit_scalar\", initialize\n the layer such that the output multivectors will be closer to (1, 0, 0, ..., 0).\n \"almost_unit_scalar\" is similar, but with more randomness.\n gain : float\n Gain factor for the activations. Should be 1.0 if previous layer has no activation,\n sqrt(2) if it has a ReLU activation, and so on. Can be computed with\n `torch.nn.init.calculate_gain()`.\n additional_factor : float\n Empirically, it has been found that slightly *decreasing* the data variance at each\n layer gives a better performance. In particular, the PyTorch default initialization uses\n an additional factor of 1/sqrt(3) (cancelling the factor of sqrt(3) that naturally\n arises when computing the bounds of a uniform initialization). A discussion of this was\n (to the best of our knowledge) never published, but see\n https://github.com/pytorch/pytorch/issues/57109 and\n https://soumith.ch/files/20141213_gplus_nninit_discussion.htm.\n use_mv_heuristics : bool\n Multivector components are differently affected by the equivariance constraint. If\n `use_mv_heuristics` is set to True, we initialize the weights for each output\n multivector component differently, with factors determined empirically to preserve the\n variance of each multivector component in the forward pass.\n \"\"\"\n\n # Prefactors depending on initialization scheme\n mv_component_factors, mv_factor, mvs_bias_shift, s_factor = self._compute_init_factors(\n initialization, gain, additional_factor, use_mv_heuristics\n )\n\n # Following He et al, 1502.01852, we aim to preserve the variance in the forward pass.\n # A sufficient criterion for this is that the variance of the weights is given by\n # `Var[w] = gain^2 / fan`.\n # Here `gain^2` is 2 if the previous layer has a ReLU nonlinearity, 1 for the initial layer,\n # and some other value in other situations (we may not care about this too much).\n # More importantly, `fan` is the number of connections: the number of input elements that\n # get summed over to compute each output element.\n\n # Let us fist consider the multivector outputs.\n self._init_multivectors(mv_component_factors, mv_factor, mvs_bias_shift)\n\n # Then let's consider the maps to scalars.\n self._init_scalars(s_factor)\n\n @staticmethod\n def _compute_init_factors(initialization, gain, additional_factor, use_mv_heuristics):\n \"\"\"Computes prefactors for the initialization.\n\n See self.reset_parameters().\n \"\"\"\n\n if initialization not in {\"default\", \"small\", \"unit_scalar\", \"almost_unit_scalar\"}:\n raise ValueError(f\"Unknown initialization scheme {initialization}\")\n\n if initialization == \"default\":\n mv_factor = gain * additional_factor * np.sqrt(3)\n s_factor = gain * additional_factor * np.sqrt(3)\n mvs_bias_shift = 0.0\n elif initialization == \"small\":\n # Change scale by a factor of 0.3 in this layer\n mv_factor = 0.1 * gain * additional_factor * np.sqrt(3)\n s_factor = 0.1 * gain * additional_factor * np.sqrt(3)\n mvs_bias_shift = 0.0\n elif initialization == \"unit_scalar\":\n # Change scale by a factor of 0.3 for MV outputs, and initialize bias around 1\n mv_factor = 0.1 * gain * additional_factor * np.sqrt(3)\n s_factor = gain * additional_factor * np.sqrt(3)\n mvs_bias_shift = 1.0\n elif initialization == \"almost_unit_scalar\":\n # Change scale by a factor of 0.3 for MV outputs, and initialize bias around 1\n mv_factor = 0.5 * gain * additional_factor * np.sqrt(3)\n s_factor = gain * additional_factor * np.sqrt(3)\n mvs_bias_shift = 1.0\n else:\n raise ValueError(\n f\"Unknown initialization scheme {initialization}, expected\"\n ' \"default\", \"small\", or \"unit_scalar\".'\n )\n\n # Individual factors for each multivector component\n if use_mv_heuristics:\n # Without corrections, the variance of standard normal inputs after a forward pass\n # through this layer is different for each output grade. The reason is that the\n # equivariance constraints affect different grades differently.\n # We heuristically correct for this by initializing the weights for different basis\n # elements differently, using the following additional factors on the weight bound:\n # mv_component_factors = torch.sqrt(torch.Tensor([0.5, 4.0, 6.0, 4.0, 1.0, 0.5, 0.5]))\n mv_component_factors = torch.sqrt(\n torch.Tensor([1.0, 4.0, 6.0, 2.0, 0.5, 0.5, 1.5, 1.5, 0.5])\n )\n else:\n mv_component_factors = torch.ones(NUM_PIN_LINEAR_BASIS_ELEMENTS)\n return mv_component_factors, mv_factor, mvs_bias_shift, s_factor\n\n def _init_multivectors(self, mv_component_factors, mv_factor, mvs_bias_shift):\n \"\"\"Weight initialization for maps to multivector outputs.\"\"\"\n\n # We have\n # `outputs[..., j, y] = sum_{i, b, x} weights[j, i, b] basis_map[b, x, y] inputs[..., i, x]`\n # The basis maps are more or less grade projections, summing over all basis elements\n # corresponds to (almost) an identity map in the GA space. The sum over `b` and `x` thus\n # does not contribute to `fan` substantially. (We may add a small ad-hoc factor later to\n # make up for this approximation.) However, there is still the sum over incoming channels,\n # and thus `fan ~ mv_in_channels`. Assuming (for now) that the previous layer contained a\n # ReLU activation, we finally have the condition `Var[w] = 2 / mv_in_channels`.\n # Since the variance of a uniform distribution between -a and a is given by\n # `Var[Uniform(-a, a)] = a^2/3`, we should set `a = gain * sqrt(3 / mv_in_channels)`.\n # In theory (see docstring).\n fan_in = self._in_mv_channels\n bound = mv_factor / np.sqrt(fan_in)\n for i, factor in enumerate(mv_component_factors):\n nn.init.uniform_(self.weight[..., i], a=-factor * bound, b=factor * bound)\n\n # Now let's focus on the scalar components of the multivector outputs.\n # If there are only multivector inputs, all is good. But if scalar inputs contribute them as\n # well, they contribute to the output variance as well.\n # In this case, we initialize such that the multivector inputs and the scalar inputs each\n # contribute half to the output variance.\n # We can achieve this by inspecting the basis maps and seeing that only basis element 0\n # contributes to the scalar output. Thus, we can reduce the variance of the correponding\n # weights to give a variance of 0.5, not 1.\n if self.s2mvs is not None:\n bound = mv_component_factors[0] * mv_factor / np.sqrt(fan_in) / np.sqrt(2)\n nn.init.uniform_(self.weight[..., [0]], a=-bound, b=bound)\n\n # The same holds for the scalar-to-MV map, where we also just want a variance of 0.5.\n if self.s2mvs is not None:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(\n self.s2mvs.weight\n ) # pylint:disable=protected-access\n fan_in = max(fan_in, 1) # Since in theory we could have 0-channel scalar \"data\"\n bound = mv_component_factors[0] * mv_factor / np.sqrt(fan_in) / np.sqrt(2)\n nn.init.uniform_(self.s2mvs.weight, a=-bound, b=bound)\n\n # Bias needs to be adapted, as the overall fan in is different (need to account for MV\n # and s inputs) and we may need to account for the unit_scalar initialization scheme\n if self.s2mvs.bias is not None:\n fan_in = (\n nn.init._calculate_fan_in_and_fan_out(self.s2mvs.weight)[0]\n + self._in_mv_channels\n )\n bound = mv_component_factors[0] / np.sqrt(fan_in) if fan_in > 0 else 0\n nn.init.uniform_(self.s2mvs.bias, mvs_bias_shift - bound, mvs_bias_shift + bound)\n\n def _init_scalars(self, s_factor):\n \"\"\"Weight initialization for maps to multivector outputs.\"\"\"\n\n # If both exist, we need to account for overcounting again, and assign each a target a\n # variance of 0.5.\n models = []\n if self.s2s:\n models.append(self.s2s)\n if self.mvs2s:\n models.append(self.mvs2s)\n for model in models:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(\n model.weight\n ) # pylint:disable=protected-access\n fan_in = max(fan_in, 1) # Since in theory we could have 0-channel scalar \"data\"\n bound = s_factor / np.sqrt(fan_in) / np.sqrt(len(models))\n nn.init.uniform_(model.weight, a=-bound, b=bound)\n # Bias needs to be adapted, as the overall fan in is different (need to account for MV and\n # s inputs)\n if self.mvs2s and self.mvs2s.bias is not None:\n fan_in = nn.init._calculate_fan_in_and_fan_out(self.mvs2s.weight)[\n 0\n ] # pylint:disable=protected-access\n if self.s2s:\n fan_in += nn.init._calculate_fan_in_and_fan_out(self.s2s.weight)[\n 0\n ] # pylint:disable=protected-access\n bound = s_factor / np.sqrt(fan_in) if fan_in > 0 else 0\n nn.init.uniform_(self.mvs2s.bias, -bound, bound)"
},
{
"identifier": "BATCH_DIMS",
"path": "tests/helpers/constants.py",
"snippet": "BATCH_DIMS = [(7, 9), tuple()]"
},
{
"identifier": "TOLERANCES",
"path": "tests/helpers/constants.py",
"snippet": "TOLERANCES = dict(atol=1e-3, rtol=1e-4)"
},
{
"identifier": "check_pin_equivariance",
"path": "tests/helpers/equivariance.py",
"snippet": "def check_pin_equivariance(\n function,\n num_multivector_args=1,\n fn_kwargs=None,\n batch_dims=(1,),\n spin=False,\n rng=None,\n num_checks=3,\n **kwargs,\n):\n \"\"\"Checks whether a callable is equivariant with respect to the Pin(3,0,1) or Spin(3,0,1) group.\n\n The callable can have an arbitray number of multivector inputs.\n\n Parameters\n ----------\n function: Callable\n Function to be tested for equivariance. The first `num_multivector_args` positional\n arguments need to accept torch.Tensor inputs describing multivectors, and will be\n transformed as part of the equivariance test.\n num_multivector_args: int\n Number of multivector that `function` accepts.\n fn_kwargs : dict with str keys\n Keyword arguments to call `function` with.\n batch_dims : tuple of int\n Batch shape for the multivector inputs to `function`.\n spin : bool\n If True, this function tests Spin equivariance; if False, it tests Pin equivariance.\n rng : numpy.random.Generator or None\n Numpy rng to draw the inputs and transformations from.\n num_checks : int\n Number of function calls (with random inputs) to determine whether the function passes the\n equivariance test.\n kwargs\n Optional keyword arguments for the equality check. Will be passed on to np.allclose.\n This can for instance be used to specify the absolute and relative tolerance\n (by passing `atol` and `rtol` keyword arguments).\n \"\"\"\n # Default arguments\n if fn_kwargs is None:\n fn_kwargs = {}\n\n # Propagate numpy random state to torch\n if rng is not None:\n torch.manual_seed(rng.integers(100000))\n\n # Loop over multiple checks\n for _ in range(num_checks):\n # Generate function inputs and Pin(3,0,1) transformations\n inputs = torch.randn(num_multivector_args, *batch_dims, 16)\n transform = SlowRandomPinTransform(rng=rng, spin=spin)\n\n # First function, then transformation\n outputs = get_first_output(function(*inputs, **fn_kwargs))\n transformed_outputs = transform(outputs)\n\n # First transformation, then function\n transformed_inputs = transform(inputs)\n outputs_of_transformed = get_first_output(function(*transformed_inputs, **fn_kwargs))\n\n # Check equality\n torch.testing.assert_close(transformed_outputs, outputs_of_transformed, **kwargs)"
}
] | import pytest
import torch
from gatr.layers.linear import EquiLinear
from tests.helpers import BATCH_DIMS, TOLERANCES, check_pin_equivariance | 6,511 | out_s_channels=out_s_channels,
initialization=initialization,
)
# Some initialization schemes ar enot implemented when data is all-scalar. That's fine.
except NotImplementedError as exc:
print(exc)
return
# Inputs
inputs_mv = torch.randn(*batch_dims, in_mv_channels, 16)
inputs_s = torch.randn(*batch_dims, in_s_channels) if in_s_channels is not None else None
# Compute outputs
outputs_mv, outputs_s = layer(inputs_mv, scalars=inputs_s)
# Compute mean and variance of MV outputs
mv_mean = outputs_mv[...].cpu().detach().to(torch.float64).mean(dim=(0, 1))
mv_var = outputs_mv[...].cpu().detach().to(torch.float64).var(dim=(0, 1))
print("Output multivector means and std by components:")
for i, (mean_, var_) in enumerate(zip(mv_mean, mv_var)):
print(f" Component {i}: mean = {mean_:.2f}, std = {var_**0.5:.2f}")
# Check that the mean and variance agree with expectations
if initialization == "default":
target_mean = torch.zeros_like(mv_mean)
target_var = torch.ones_like(mv_var) / 3.0 # Factor 3 comes from heuristics
elif initialization == "small":
target_mean = torch.zeros_like(mv_mean)
target_var = 0.01 * torch.ones_like(mv_var) / 3.0
elif initialization == "unit_scalar":
target_mean = torch.zeros_like(mv_mean)
target_mean[0] = 1.0
target_var = 0.01 * torch.ones_like(mv_var) / 3.0
else:
raise ValueError(initialization)
assert torch.all(mv_mean > target_mean - 0.3)
assert torch.all(mv_mean < target_mean + 0.3)
assert torch.all(mv_var > target_var / var_tolerance)
assert torch.all(mv_var < target_var * var_tolerance)
# Same for scalar outputs
if out_s_channels is not None:
s_mean = outputs_s[...].cpu().detach().to(torch.float64).mean().item()
s_var = outputs_s[...].cpu().detach().to(torch.float64).var().item()
print(f"Output scalar: mean = {s_mean:.2f}, std = {s_var**0.5:.2f}")
assert -0.3 < s_mean < 0.3
if initialization in {"default", "unit_scalar"}:
assert 1.0 / 3.0 / var_tolerance < s_var < 1.0 / 3.0 * var_tolerance
else:
assert 0.01 / 3.0 / var_tolerance < s_var < 0.01 / 3.0 * var_tolerance
@pytest.mark.parametrize("rescaling", [0.0, -2.0, 100.0])
@pytest.mark.parametrize("batch_dims", BATCH_DIMS)
@pytest.mark.parametrize("in_mv_channels", [9, 1])
@pytest.mark.parametrize("out_mv_channels", [7, 1])
@pytest.mark.parametrize("in_s_channels", [None, 3])
@pytest.mark.parametrize("out_s_channels", [None, 4])
def test_linear_layer_linearity(
batch_dims, in_mv_channels, out_mv_channels, in_s_channels, out_s_channels, rescaling
):
"""Tests that the EquiLinear layer indeed describes a linear map (when the bias is deactivated).
Checks that `f(x + rescaling * y) = f(x) + rescaling * f(y)` for random inputs `x`, `y` and
linear layer `f(x)`.
"""
layer = EquiLinear(
in_mv_channels,
out_mv_channels,
in_s_channels=in_s_channels,
out_s_channels=out_s_channels,
bias=False,
)
# Inputs
x_mv = torch.randn(*batch_dims, in_mv_channels, 16)
y_mv = torch.randn(*batch_dims, in_mv_channels, 16)
xy_mv = x_mv + rescaling * y_mv
if in_s_channels:
x_s = torch.randn(*batch_dims, in_s_channels)
y_s = torch.randn(*batch_dims, in_s_channels)
xy_s = x_s + rescaling * y_s
else:
x_s, y_s, xy_s = None, None, None
# Compute outputs
o_xy_mv, o_xy_s = layer(xy_mv, scalars=xy_s)
o_x_mv, o_x_s = layer(x_mv, scalars=x_s)
o_y_mv, o_y_s = layer(y_mv, scalars=y_s)
# Check equality
torch.testing.assert_close(o_xy_mv, o_x_mv + rescaling * o_y_mv, **TOLERANCES)
if out_s_channels is not None:
torch.testing.assert_close(o_xy_s, o_x_s + rescaling * o_y_s, **TOLERANCES)
@pytest.mark.parametrize("batch_dims", BATCH_DIMS)
@pytest.mark.parametrize("in_mv_channels", [9, 1])
@pytest.mark.parametrize("out_mv_channels", [7, 1])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("in_s_channels", [None, 3])
@pytest.mark.parametrize("out_s_channels", [None, 4])
def test_linear_layer_equivariance(
batch_dims, in_mv_channels, out_mv_channels, in_s_channels, out_s_channels, bias
):
"""Tests the equi_linear() primitive for equivariance."""
layer = EquiLinear(
in_mv_channels,
out_mv_channels,
in_s_channels=in_s_channels,
out_s_channels=out_s_channels,
bias=bias,
)
data_dims = tuple(list(batch_dims) + [in_mv_channels])
scalars = None if in_s_channels is None else torch.randn(*batch_dims, in_s_channels)
| # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All rights reserved.
@pytest.mark.parametrize("batch_dims", [(100,)])
@pytest.mark.parametrize("in_mv_channels, out_mv_channels", [(200, 5), (16, 16), (5, 200)])
@pytest.mark.parametrize(
"in_s_channels, out_s_channels", [(None, None), (None, 100), (100, None), (32, 32)]
)
@pytest.mark.parametrize("initialization", ["default", "small", "unit_scalar"])
def test_linear_layer_initialization(
initialization,
batch_dims,
in_mv_channels,
out_mv_channels,
in_s_channels,
out_s_channels,
var_tolerance=10.0,
):
"""Tests the initialization of `EquiLinear`.
The goal is that independent of the channel size, inputs with variance 1 are mapped to outputs
with, very roughly, variance 1.
"""
# Create layer
try:
layer = EquiLinear(
in_mv_channels,
out_mv_channels,
in_s_channels=in_s_channels,
out_s_channels=out_s_channels,
initialization=initialization,
)
# Some initialization schemes ar enot implemented when data is all-scalar. That's fine.
except NotImplementedError as exc:
print(exc)
return
# Inputs
inputs_mv = torch.randn(*batch_dims, in_mv_channels, 16)
inputs_s = torch.randn(*batch_dims, in_s_channels) if in_s_channels is not None else None
# Compute outputs
outputs_mv, outputs_s = layer(inputs_mv, scalars=inputs_s)
# Compute mean and variance of MV outputs
mv_mean = outputs_mv[...].cpu().detach().to(torch.float64).mean(dim=(0, 1))
mv_var = outputs_mv[...].cpu().detach().to(torch.float64).var(dim=(0, 1))
print("Output multivector means and std by components:")
for i, (mean_, var_) in enumerate(zip(mv_mean, mv_var)):
print(f" Component {i}: mean = {mean_:.2f}, std = {var_**0.5:.2f}")
# Check that the mean and variance agree with expectations
if initialization == "default":
target_mean = torch.zeros_like(mv_mean)
target_var = torch.ones_like(mv_var) / 3.0 # Factor 3 comes from heuristics
elif initialization == "small":
target_mean = torch.zeros_like(mv_mean)
target_var = 0.01 * torch.ones_like(mv_var) / 3.0
elif initialization == "unit_scalar":
target_mean = torch.zeros_like(mv_mean)
target_mean[0] = 1.0
target_var = 0.01 * torch.ones_like(mv_var) / 3.0
else:
raise ValueError(initialization)
assert torch.all(mv_mean > target_mean - 0.3)
assert torch.all(mv_mean < target_mean + 0.3)
assert torch.all(mv_var > target_var / var_tolerance)
assert torch.all(mv_var < target_var * var_tolerance)
# Same for scalar outputs
if out_s_channels is not None:
s_mean = outputs_s[...].cpu().detach().to(torch.float64).mean().item()
s_var = outputs_s[...].cpu().detach().to(torch.float64).var().item()
print(f"Output scalar: mean = {s_mean:.2f}, std = {s_var**0.5:.2f}")
assert -0.3 < s_mean < 0.3
if initialization in {"default", "unit_scalar"}:
assert 1.0 / 3.0 / var_tolerance < s_var < 1.0 / 3.0 * var_tolerance
else:
assert 0.01 / 3.0 / var_tolerance < s_var < 0.01 / 3.0 * var_tolerance
@pytest.mark.parametrize("rescaling", [0.0, -2.0, 100.0])
@pytest.mark.parametrize("batch_dims", BATCH_DIMS)
@pytest.mark.parametrize("in_mv_channels", [9, 1])
@pytest.mark.parametrize("out_mv_channels", [7, 1])
@pytest.mark.parametrize("in_s_channels", [None, 3])
@pytest.mark.parametrize("out_s_channels", [None, 4])
def test_linear_layer_linearity(
batch_dims, in_mv_channels, out_mv_channels, in_s_channels, out_s_channels, rescaling
):
"""Tests that the EquiLinear layer indeed describes a linear map (when the bias is deactivated).
Checks that `f(x + rescaling * y) = f(x) + rescaling * f(y)` for random inputs `x`, `y` and
linear layer `f(x)`.
"""
layer = EquiLinear(
in_mv_channels,
out_mv_channels,
in_s_channels=in_s_channels,
out_s_channels=out_s_channels,
bias=False,
)
# Inputs
x_mv = torch.randn(*batch_dims, in_mv_channels, 16)
y_mv = torch.randn(*batch_dims, in_mv_channels, 16)
xy_mv = x_mv + rescaling * y_mv
if in_s_channels:
x_s = torch.randn(*batch_dims, in_s_channels)
y_s = torch.randn(*batch_dims, in_s_channels)
xy_s = x_s + rescaling * y_s
else:
x_s, y_s, xy_s = None, None, None
# Compute outputs
o_xy_mv, o_xy_s = layer(xy_mv, scalars=xy_s)
o_x_mv, o_x_s = layer(x_mv, scalars=x_s)
o_y_mv, o_y_s = layer(y_mv, scalars=y_s)
# Check equality
torch.testing.assert_close(o_xy_mv, o_x_mv + rescaling * o_y_mv, **TOLERANCES)
if out_s_channels is not None:
torch.testing.assert_close(o_xy_s, o_x_s + rescaling * o_y_s, **TOLERANCES)
@pytest.mark.parametrize("batch_dims", BATCH_DIMS)
@pytest.mark.parametrize("in_mv_channels", [9, 1])
@pytest.mark.parametrize("out_mv_channels", [7, 1])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("in_s_channels", [None, 3])
@pytest.mark.parametrize("out_s_channels", [None, 4])
def test_linear_layer_equivariance(
batch_dims, in_mv_channels, out_mv_channels, in_s_channels, out_s_channels, bias
):
"""Tests the equi_linear() primitive for equivariance."""
layer = EquiLinear(
in_mv_channels,
out_mv_channels,
in_s_channels=in_s_channels,
out_s_channels=out_s_channels,
bias=bias,
)
data_dims = tuple(list(batch_dims) + [in_mv_channels])
scalars = None if in_s_channels is None else torch.randn(*batch_dims, in_s_channels) | check_pin_equivariance( | 3 | 2023-10-23 15:58:36+00:00 | 8k |
StanislavPetrovV/Wolfenstein-3D-Clone | engine.py | [
{
"identifier": "Player",
"path": "player.py",
"snippet": "class Player(Camera):\n def __init__(self, eng, position=PLAYER_POS, yaw=0, pitch=0):\n self.app = eng.app\n self.eng = eng\n self.sound = eng.sound\n self.play = eng.sound.play\n super().__init__(position, yaw, pitch)\n\n # these maps will update when instantiated LevelMap\n self.door_map, self.wall_map, self.item_map = None, None, None\n\n # attribs\n self.health = self.eng.player_attribs.health\n self.ammo = self.eng.player_attribs.ammo\n #\n self.tile_pos: Tuple[int, int] = None\n\n # weapon\n self.weapons = self.eng.player_attribs.weapons\n self.weapon_id = self.eng.player_attribs.weapon_id\n self.weapon_cycle = cycle(self.eng.player_attribs.weapons.keys())\n #\n self.is_shot = False\n #\n self.key = None\n\n def handle_events(self, event):\n if event.type == pg.KEYDOWN:\n # door interaction\n if event.key == KEYS['INTERACT']:\n self.interact_with_door()\n\n # switch weapon by keys\n if event.key == KEYS['WEAPON_1']:\n self.switch_weapon(weapon_id=ID.KNIFE_0)\n elif event.key == KEYS['WEAPON_2']:\n self.switch_weapon(weapon_id=ID.PISTOL_0)\n elif event.key == KEYS['WEAPON_3']:\n self.switch_weapon(weapon_id=ID.RIFLE_0)\n\n # weapon by mouse wheel\n if event.type == pg.MOUSEWHEEL:\n weapon_id = next(self.weapon_cycle)\n if self.weapons[weapon_id]:\n self.switch_weapon(weapon_id=weapon_id)\n\n # shooting\n if event.type == pg.MOUSEBUTTONDOWN:\n if event.button == 1:\n self.do_shot()\n\n def update(self):\n self.mouse_control()\n self.keyboard_control()\n super().update()\n #\n self.check_health()\n self.update_tile_position()\n self.pick_up_item()\n\n def check_health(self):\n if self.health <= 0:\n self.play(self.sound.player_death)\n #\n pg.time.wait(2000)\n self.eng.player_attribs = PlayerAttribs()\n self.eng.new_game()\n\n def check_hit_on_npc(self):\n if WEAPON_SETTINGS[self.weapon_id]['miss_probability'] > random.random():\n return None\n\n if npc_pos := self.eng.ray_casting.run(\n start_pos=self.position,\n direction=self.forward,\n max_dist=WEAPON_SETTINGS[self.weapon_id]['max_dist'],\n npc_to_player_flag=False\n ):\n npc = self.eng.level_map.npc_map[npc_pos]\n npc.get_damage()\n\n def switch_weapon(self, weapon_id):\n if self.weapons[weapon_id]:\n self.weapon_instance.weapon_id = self.weapon_id = weapon_id\n\n def do_shot(self):\n if self.weapon_id == ID.KNIFE_0:\n self.is_shot = True\n self.check_hit_on_npc()\n #\n self.play(self.sound.player_attack[ID.KNIFE_0])\n\n elif self.ammo:\n consumption = WEAPON_SETTINGS[self.weapon_id]['ammo_consumption']\n if not self.is_shot and self.ammo >= consumption:\n self.is_shot = True\n self.check_hit_on_npc()\n #\n self.ammo -= consumption\n self.ammo = max(0, self.ammo)\n #\n self.play(self.sound.player_attack[self.weapon_id])\n\n def update_tile_position(self):\n self.tile_pos = int(self.position.x), int(self.position.z)\n\n def pick_up_item(self):\n if self.tile_pos not in self.item_map:\n return None\n\n item = self.item_map[self.tile_pos]\n #\n if item.tex_id == ID.MED_KIT:\n self.health += ITEM_SETTINGS[ID.MED_KIT]['value']\n self.health = min(self.health, MAX_HEALTH_VALUE)\n #\n elif item.tex_id == ID.AMMO:\n self.ammo += ITEM_SETTINGS[ID.AMMO]['value']\n self.ammo = min(self.ammo, MAX_AMMO_VALUE)\n #\n elif item.tex_id == ID.PISTOL_ICON:\n if not self.weapons[ID.PISTOL_0]:\n self.weapons[ID.PISTOL_0] = 1\n self.switch_weapon(weapon_id=ID.PISTOL_0)\n #\n elif item.tex_id == ID.RIFLE_ICON:\n if not self.weapons[ID.RIFLE_0]:\n self.weapons[ID.RIFLE_0] = 1\n self.switch_weapon(weapon_id=ID.RIFLE_0)\n #\n elif item.tex_id == ID.KEY:\n self.key = 1\n #\n self.play(self.sound.pick_up[item.tex_id])\n #\n del self.item_map[self.tile_pos]\n\n def interact_with_door(self):\n pos = self.position + self.forward\n int_pos = int(pos.x), int(pos.z)\n\n if int_pos not in self.door_map:\n return None\n\n door = self.door_map[int_pos]\n #\n if self.key and door.tex_id == ID.KEY_DOOR:\n #\n door.is_closed = not door.is_closed\n self.play(self.sound.player_missed)\n # next level\n pg.time.wait(300)\n #\n self.eng.player_attribs.update(player=self)\n self.eng.player_attribs.num_level += 1\n self.eng.player_attribs.num_level %= NUM_LEVELS\n self.eng.new_game()\n else:\n door.is_moving = True\n self.play(self.sound.open_door)\n\n def mouse_control(self):\n mouse_dx, mouse_dy = pg.mouse.get_rel()\n if mouse_dx:\n self.rotate_yaw(delta_x=mouse_dx * MOUSE_SENSITIVITY)\n if mouse_dy:\n self.rotate_pitch(delta_y=mouse_dy * MOUSE_SENSITIVITY)\n\n def keyboard_control(self):\n key_state = pg.key.get_pressed()\n vel = PLAYER_SPEED * self.app.delta_time\n next_step = glm.vec2()\n #\n if key_state[KEYS['FORWARD']]:\n next_step += self.move_forward(vel)\n if key_state[KEYS['BACK']]:\n next_step += self.move_back(vel)\n if key_state[KEYS['STRAFE_R']]:\n next_step += self.move_right(vel)\n if key_state[KEYS['STRAFE_L']]:\n next_step += self.move_left(vel)\n #\n self.move(next_step=next_step)\n\n def move(self, next_step):\n if not self.is_collide(dx=next_step[0]):\n self.position.x += next_step[0]\n\n if not self.is_collide(dz=next_step[1]):\n self.position.z += next_step[1]\n\n def is_collide(self, dx=0, dz=0):\n int_pos = (\n int(self.position.x + dx + (\n PLAYER_SIZE if dx > 0 else -PLAYER_SIZE if dx < 0 else 0)\n ),\n int(self.position.z + dz + (\n PLAYER_SIZE if dz > 0 else -PLAYER_SIZE if dz < 0 else 0)\n )\n )\n # check doors\n if int_pos in self.door_map:\n return self.door_map[int_pos].is_closed\n # check walls\n return int_pos in self.wall_map"
},
{
"identifier": "PlayerAttribs",
"path": "player.py",
"snippet": "class PlayerAttribs:\n def __init__(self):\n self.health = PLAYER_INIT_HEALTH\n self.ammo = PLAYER_INIT_AMMO\n self.weapons = {ID.KNIFE_0: 1, ID.PISTOL_0: 0, ID.RIFLE_0: 0}\n self.weapon_id = ID.KNIFE_0\n self.num_level = 0\n\n def update(self, player):\n self.health = player.health\n self.ammo = player.ammo\n self.weapons = player.weapons\n self.weapon_id = player.weapon_id"
},
{
"identifier": "Scene",
"path": "scene.py",
"snippet": "class Scene:\n def __init__(self, eng):\n self.eng = eng\n self.level_mesh = LevelMesh(eng)\n\n self.hud = HUD(eng)\n self.doors = self.eng.level_map.door_map.values()\n self.items = self.eng.level_map.item_map.values()\n self.npc = self.eng.level_map.npc_map.values()\n self.weapon = Weapon(eng)\n\n self.instanced_door_mesh = InstancedQuadMesh(\n eng, self.doors, eng.shader_program.instanced_door\n )\n self.instanced_item_mesh = InstancedQuadMesh(\n eng, self.items, eng.shader_program.instanced_billboard\n )\n self.instanced_hud_mesh = InstancedQuadMesh(\n eng, self.hud.objects, eng.shader_program.instanced_hud\n )\n self.instanced_npc_mesh = InstancedQuadMesh(\n eng, self.npc, eng.shader_program.instanced_billboard\n )\n self.weapon_mesh = WeaponMesh(eng, eng.shader_program.weapon, self.weapon)\n\n def update(self):\n for door in self.doors:\n door.update()\n for npc in self.npc:\n npc.update()\n self.hud.update()\n self.weapon.update()\n\n def render(self):\n # level\n self.level_mesh.render()\n # doors\n self.instanced_door_mesh.render()\n # items\n self.instanced_item_mesh.render()\n # hud\n self.instanced_hud_mesh.render()\n # npc\n self.instanced_npc_mesh.render()\n # weapon\n self.weapon_mesh.render()"
},
{
"identifier": "ShaderProgram",
"path": "shader_program.py",
"snippet": "class ShaderProgram:\n def __init__(self, eng):\n self.eng = eng\n self.ctx = eng.ctx\n self.player = eng.player\n\n # -------- shaders -------- #\n self.level = self.get_program(shader_name='level')\n self.instanced_door = self.get_program(shader_name='instanced_door')\n self.instanced_billboard = self.get_program(shader_name='instanced_billboard')\n self.instanced_hud = self.get_program(shader_name='instanced_hud')\n self.weapon = self.get_program(shader_name='weapon')\n # ------------------------- #\n self.set_uniforms_on_init()\n\n def set_uniforms_on_init(self):\n # level\n self.level['m_proj'].write(self.player.m_proj)\n self.level['u_texture_array_0'] = TEXTURE_UNIT_0\n\n # instanced door\n self.instanced_door['m_proj'].write(self.player.m_proj)\n self.instanced_door['u_texture_array_0'] = TEXTURE_UNIT_0\n\n # billboard\n self.instanced_billboard['m_proj'].write(self.player.m_proj)\n self.instanced_billboard['u_texture_array_0'] = TEXTURE_UNIT_0\n\n # hud\n self.instanced_hud['u_texture_array_0'] = TEXTURE_UNIT_0\n\n # weapon\n self.weapon['u_texture_array_0'] = TEXTURE_UNIT_0\n\n def update(self):\n self.level['m_view'].write(self.player.m_view)\n self.instanced_door['m_view'].write(self.player.m_view)\n self.instanced_billboard['m_view'].write(self.player.m_view)\n\n def get_program(self, shader_name):\n with open(f'shaders/{shader_name}.vert') as file:\n vertex_shader = file.read()\n\n with open(f'shaders/{shader_name}.frag') as file:\n fragment_shader = file.read()\n\n program = self.ctx.program(vertex_shader=vertex_shader, fragment_shader=fragment_shader)\n return program"
},
{
"identifier": "PathFinder",
"path": "path_finding.py",
"snippet": "class PathFinder:\n def __init__(self, eng):\n self.eng = eng\n self.level_map = eng.level_map\n self.wall_map = eng.level_map.wall_map\n self.ways = ([-1, 0], [0, -1], [1, 0], [0, 1], [-1, -1], [1, -1], [1, 1], [-1, 1])\n self.graph = {}\n self.update_graph()\n\n @lru_cache\n def find(self, start_pos, end_pos):\n visited = self.bfs(start_pos, end_pos)\n path = [end_pos]\n step = visited.get(end_pos, start_pos)\n\n while step and step != start_pos:\n path.append(step)\n step = visited[step]\n return path[-1]\n\n def bfs(self, start, goal):\n queue = deque([start])\n visited = {start: None}\n\n while queue:\n cur_node = queue.popleft()\n if cur_node == goal:\n break\n next_nodes = self.graph[cur_node]\n\n for next_node in next_nodes:\n if next_node not in visited and next_node not in self.eng.level_map.npc_map:\n queue.append(next_node)\n visited[next_node] = cur_node\n return visited\n\n def get_next_nodes(self, x, y):\n return [\n (x + dx, y + dy) for dx, dy in self.ways if (x + dx, y + dy) not in self.wall_map\n ]\n\n def update_graph(self):\n for y in range(self.level_map.depth):\n for x in range(self.level_map.width):\n self.graph[(x, y)] = self.graph.get((x, y), []) + self.get_next_nodes(x, y)"
},
{
"identifier": "RayCasting",
"path": "ray_casting.py",
"snippet": "class RayCasting:\n def __init__(self, eng):\n self.eng = eng\n self.level_map = eng.level_map\n self.wall_map = eng.level_map.wall_map\n self.door_map = eng.level_map.door_map\n self.player = eng.player\n\n @staticmethod\n def get_init_data(pos1, pos2):\n d_ = glm.sign(pos2 - pos1)\n #\n delta_ = min(d_ / (pos2 - pos1), 10000000.0) if d_ != 0 else 10000000.0\n #\n max_ = delta_ * (1.0 - glm.fract(pos1)) if d_ > 0 else delta_ * glm.fract(pos1)\n return d_, delta_, max_\n\n def run(self, start_pos, direction, max_dist=MAX_RAY_DIST, npc_to_player_flag=True):\n #\n x1, y1, z1 = start_pos # start point\n x2, y2, z2 = start_pos + direction * max_dist # end point\n cur_voxel_pos = glm.ivec3(x1, y1, z1)\n\n # init ray casting\n dx, delta_x, max_x = self.get_init_data(x1, x2)\n dy, delta_y, max_y = self.get_init_data(y1, y2)\n dz, delta_z, max_z = self.get_init_data(z1, z2)\n\n while not (max_x > 1.0 and max_y > 1.0 and max_z > 1.0):\n #\n cur_tile_pos = (cur_voxel_pos.x, cur_voxel_pos.z)\n\n # ----------------------------------------------\n # check walls\n if cur_tile_pos in self.wall_map:\n return False\n # check closed doors\n if cur_tile_pos in self.door_map:\n if self.door_map[cur_tile_pos].is_closed:\n return False\n\n # check ray from npc or player\n if npc_to_player_flag:\n if self.player.tile_pos == cur_tile_pos:\n return True\n # from player to npc\n elif cur_tile_pos in self.level_map.npc_map:\n return cur_tile_pos\n # ----------------------------------------------\n if max_x < max_y:\n if max_x < max_z:\n cur_voxel_pos.x += dx\n max_x += delta_x\n else:\n cur_voxel_pos.z += dz\n max_z += delta_z\n else:\n if max_y < max_z:\n cur_voxel_pos.y += dy\n max_y += delta_y\n else:\n cur_voxel_pos.z += dz\n max_z += delta_z\n return False"
},
{
"identifier": "LevelMap",
"path": "level_map.py",
"snippet": "class LevelMap:\n def __init__(self, eng, tmx_file='test.tmx'):\n self.eng = eng\n self.tiled_map = pytmx.TiledMap(f'resources/levels/{tmx_file}')\n self.gid_map = self.tiled_map.tiledgidmap\n\n self.width = self.tiled_map.width\n self.depth = self.tiled_map.height\n\n self.wall_map, self.floor_map, self.ceil_map = {}, {}, {}\n self.door_map, self.item_map, = {}, {}\n self.npc_map, self.npc_list = {}, []\n #\n self.parse_level()\n\n def get_id(self, gid):\n return self.gid_map[gid] - 1\n\n def parse_level(self):\n # get player pos\n player = self.tiled_map.get_layer_by_name('player').pop()\n player_pos = glm.vec3(player.x / TEX_SIZE, PLAYER_HEIGHT, player.y / TEX_SIZE)\n # set pos\n self.eng.player.position = player_pos\n\n walls = self.tiled_map.get_layer_by_name('walls')\n floors = self.tiled_map.get_layer_by_name('floors')\n ceilings = self.tiled_map.get_layer_by_name('ceilings')\n\n for ix in range(self.width):\n for iz in range(self.depth):\n if gid := walls.data[iz][ix]:\n # wall hash map\n self.wall_map[(ix, iz)] = self.get_id(gid)\n\n if gid := floors.data[iz][ix]:\n # floor hash map\n self.floor_map[(ix, iz)] = self.get_id(gid)\n\n if gid := ceilings.data[iz][ix]:\n # ceiling hash map\n self.ceil_map[(ix, iz)] = self.get_id(gid)\n\n # get doors\n door_objects = self.tiled_map.get_layer_by_name('doors')\n for obj in door_objects:\n # door hash map\n pos = int(obj.x / TEX_SIZE), int(obj.y / TEX_SIZE)\n door = Door(self, tex_id=self.get_id(obj.gid), x=pos[0], z=pos[1])\n self.door_map[pos] = door\n\n # get items\n items = self.tiled_map.get_layer_by_name('items')\n for obj in items:\n # item hash map\n pos = int(obj.x / TEX_SIZE), int(obj.y / TEX_SIZE)\n item = Item(self, tex_id=self.get_id(obj.gid), x=pos[0], z=pos[1])\n self.item_map[pos] = item\n\n # get npc\n npc = self.tiled_map.get_layer_by_name('npc')\n for obj in npc:\n # npc map\n pos = int(obj.x / TEX_SIZE), int(obj.y / TEX_SIZE)\n npc = NPC(self, tex_id=self.get_id(obj.gid), x=pos[0], z=pos[1])\n self.npc_map[pos] = npc\n self.npc_list.append(npc)\n\n # update player data\n self.eng.player.wall_map = self.wall_map\n self.eng.player.door_map = self.door_map\n self.eng.player.item_map = self.item_map"
},
{
"identifier": "Textures",
"path": "textures.py",
"snippet": "class Textures:\n def __init__(self, eng):\n self.eng = eng\n self.ctx = eng.ctx\n\n # build texture arrays\n TextureArrayBuilder(should_build=True)\n\n # load textures\n self.texture_array = self.load('texture_array/texture_array.png')\n\n # assign texture unit\n self.texture_array.use(location=TEXTURE_UNIT_0)\n\n def load(self, file_path):\n texture = pg.image.load(f'assets/{file_path}')\n texture = pg.transform.flip(texture, flip_x=True, flip_y=False)\n\n num_layers = texture.get_height() // texture.get_width()\n texture = self.eng.ctx.texture_array(\n size=(texture.get_width(), texture.get_height() // num_layers, num_layers),\n components=4,\n data=pg.image.tostring(texture, 'RGBA', False)\n )\n\n texture.anisotropy = 32.0\n texture.build_mipmaps()\n texture.filter = (mgl.NEAREST, mgl.NEAREST)\n return texture"
},
{
"identifier": "Sound",
"path": "sound.py",
"snippet": "class Sound:\n def __init__(self):\n pg.mixer.init()\n pg.mixer.set_num_channels(MAX_SOUND_CHANNELS)\n self.channel = 0\n self.path = 'assets/sounds/'\n #\n self.player_attack = {\n ID.KNIFE_0: self.load('w_knife.ogg', volume=0.2),\n ID.PISTOL_0: self.load('w_pistol.wav', volume=0.2),\n ID.RIFLE_0: self.load('w_rifle.ogg', volume=0.2)\n }\n #\n self.player_hurt = self.load('p_hurt.ogg')\n #\n self.player_death = self.load('p_death.ogg')\n #\n self.player_missed = self.load('p_missed.wav')\n #\n self.open_door = self.load('p_open_door.wav', volume=1.0)\n #\n self.pick_up = {\n ID.AMMO: self.load('p_ammo.ogg'),\n ID.MED_KIT: self.load('p_med_kit.mp3'),\n ID.KEY: self.load('p_key.wav'),\n }\n self.pick_up[ID.PISTOL_ICON] = self.pick_up[ID.AMMO]\n self.pick_up[ID.RIFLE_ICON] = self.pick_up[ID.AMMO]\n #\n self.enemy_attack = {\n ID.SOLDIER_BLUE_0: self.load('n_soldier_attack.mp3', volume=0.8),\n ID.SOLDIER_BROWN_0: self.load('n_soldier_attack.mp3', volume=0.8),\n ID.RAT_0: self.load('n_rat_attack.ogg', volume=0.2),\n }\n #\n self.spotted = {\n ID.SOLDIER_BLUE_0: self.load('n_soldier_spotted.ogg', volume=1.0),\n ID.SOLDIER_BROWN_0: self.load('n_brown_spotted.ogg', volume=0.8),\n ID.RAT_0: self.load('n_rat_spotted.ogg', volume=0.5),\n }\n #\n self.death = {\n ID.SOLDIER_BLUE_0: self.load('n_blue_death.ogg', volume=0.8),\n ID.SOLDIER_BROWN_0: self.load('n_brown_death.ogg', volume=0.8),\n ID.RAT_0: self.load('no_sound.mp3', volume=0.0),\n }\n #\n pg.mixer.music.load(self.path + 'theme.ogg')\n pg.mixer.music.set_volume(0.1)\n\n def load(self, file_name, volume=0.5):\n sound = pg.mixer.Sound(self.path + file_name)\n sound.set_volume(volume)\n return sound\n\n def play(self, sound):\n pg.mixer.Channel(self.channel).play(sound)\n self.channel += 1\n if self.channel == MAX_SOUND_CHANNELS:\n self.channel = 0"
}
] | from player import Player, PlayerAttribs
from scene import Scene
from shader_program import ShaderProgram
from path_finding import PathFinder
from ray_casting import RayCasting
from level_map import LevelMap
from textures import Textures
from sound import Sound
import pygame as pg | 5,796 |
class Engine:
def __init__(self, app):
self.app = app
self.ctx = app.ctx
self.num_level = 0
self.textures = Textures(self)
self.sound = Sound()
self.player_attribs = PlayerAttribs()
self.player: Player = None
|
class Engine:
def __init__(self, app):
self.app = app
self.ctx = app.ctx
self.num_level = 0
self.textures = Textures(self)
self.sound = Sound()
self.player_attribs = PlayerAttribs()
self.player: Player = None | self.shader_program: ShaderProgram = None | 3 | 2023-10-22 08:41:55+00:00 | 8k |
amazon-science/cceval | eval.py | [
{
"identifier": "compute_metric_stmt",
"path": "eval_metric.py",
"snippet": "def compute_metric_stmt(args):\n with open(f\"{args.output_dir}/prediction.jsonl\", \"r\") as f_pred:\n samples = []\n for l in f_pred.readlines():\n samples.append(json.loads(l))\n\n examples = {}\n with open(args.prompt_file, \"r\") as f_in:\n for l in f_in.readlines():\n ex = json.loads(l)\n examples[ex[\"metadata\"][\"task_id\"]] = {\n \"prompt\": ex[\"prompt\"],\n \"groundtruth\": ex[\"groundtruth\"]\n }\n\n assert len(samples) == len(examples), f\"{len(samples)} != {len(examples)}\"\n\n global parser\n ts_lang = \"c_sharp\" if args.language == \"csharp\" else args.language\n language = Language(args.ts_lib, ts_lang)\n parser = Parser()\n parser.set_language(language)\n\n truncated_samples = []\n em_labels = []\n\n print(\"post-processing samples ...\")\n pool = mp.Pool(mp.cpu_count() - 1)\n worker = partial(process_examples, args.language)\n\n with tqdm(total=len(samples)) as pbar:\n for output in pool.imap_unordered(worker, zip(samples, [examples[s[\"task_id\"]] for s in samples])):\n trunc_s, em_label = output\n em_labels.append(em_label)\n truncated_samples.append(trunc_s)\n pbar.update()\n\n exact_match = 0\n with open(f\"{args.output_dir}/prediction_truncated.jsonl\", 'w', encoding=\"utf-8\") as pt, \\\n open(f\"{args.output_dir}/exact_match_idx.jsonl\", 'w') as em:\n for trunc_s, em_label in zip(truncated_samples, em_labels):\n pt.write(json.dumps(trunc_s) + \"\\n\")\n if em_label == 1:\n em.write(f'{trunc_s[\"task_id\"]}\\n')\n exact_match += 1\n\n ### Score calculation\n\n id_em = []\n edit_similarities = []\n detailed_results = []\n\n for idx, trunc_s in enumerate(truncated_samples):\n identifier_em = int(trunc_s[\"pred_ids\"] == trunc_s[\"target_ids\"])\n es = cal_edit_sim([trunc_s[\"target\"]], [trunc_s[\"pred\"]])\n id_tp, id_fp, id_fn = compute_id_match(trunc_s[\"pred_ids\"], trunc_s[\"target_ids\"])\n id_em.append(identifier_em)\n edit_similarities.append(es)\n\n detailed_results.append({\n \"task_id\": trunc_s[\"task_id\"],\n \"em\": em_labels[idx],\n \"es\": es,\n \"id_em\": identifier_em,\n \"id_precision\": id_tp / (id_tp + id_fp) if (id_tp + id_fp) != 0 else 0,\n \"id_recall\": id_tp / (id_tp + id_fn) if (id_tp + id_fn) != 0 else 0,\n \"id_f1\": 2 * id_tp / (2 * id_tp + id_fp + id_fn) if (2 * id_tp + id_fp + id_fn) != 0 else 0,\n })\n\n em_ratio = round(exact_match / len(samples) * 100, 2)\n edit_sim = round(sum(edit_similarities) / len(edit_similarities), 2)\n\n id_em_ratio = round(\n sum(detailed_results[idx]['id_em'] for idx in range(len(detailed_results))) / len(detailed_results) * 100, 2)\n id_precision = round(sum(detailed_results[idx]['id_precision'] for idx in range(len(detailed_results))) / len(\n detailed_results) * 100, 2)\n id_recall = round(\n sum(detailed_results[idx]['id_recall'] for idx in range(len(detailed_results))) / len(detailed_results) * 100,\n 2)\n id_f1 = round(\n sum(detailed_results[idx]['id_f1'] for idx in range(len(detailed_results))) / len(detailed_results) * 100, 2)\n\n print(\n f\"Code Matching: \"\n f\"EM {em_ratio:.2f}, \"\n f\"ES {edit_sim:.2f}\"\n )\n\n print(\n f\"ID matching: \"\n f\"EM {id_em_ratio}, \"\n f\"Precision {id_precision}, \"\n f\"Recall {id_recall}, \"\n f\"F1 {id_f1}\"\n )\n\n with open(f\"{args.output_dir}/detailed_results.json\", 'w') as f:\n for dr in detailed_results:\n f.write(json.dumps(dr) + \"\\n\")\n\n # write the results to a file\n with open(f\"{args.output_dir}/results.json\", 'w') as f:\n res = {\n \"em\": em_ratio,\n \"es\": edit_sim,\n \"id_em\": id_em_ratio,\n \"id_precision\": id_precision,\n \"id_recall\": id_recall,\n \"total\": len(truncated_samples)\n }\n f.write(json.dumps(res, indent=2))"
},
{
"identifier": "compute_mean_logp",
"path": "eval_utils.py",
"snippet": "def compute_mean_logp(scores, sequences, pad_token_id):\n assert scores.shape[0] == sequences.shape[0]\n assert scores.shape[1] == sequences.shape[1]\n with torch.no_grad():\n logp_vocab = torch.nn.functional.log_softmax(scores, dim=-1)\n indices = torch.unsqueeze(sequences, dim=-1)\n logp = torch.gather(logp_vocab, dim=-1, index=indices).squeeze(-1)\n sum_logp = torch.cumsum(logp, dim=1) # batch_size, seq_len\n denom = torch.arange(1, sum_logp.shape[1] + 1).reshape(1, -1).to(device=sum_logp.device) # 1, seq_len\n mean_logp = (sum_logp / denom).tolist() # batch_size, seq_len\n sequence_lengths = (sequences != pad_token_id).sum(1).tolist() # batch_size\n mean_logp = [mean_logp[idx][l - 1] for idx, l in enumerate(sequence_lengths)]\n return mean_logp"
}
] | import argparse
import json
import logging
import os
import numpy as np
import torch
import custom_generate
from accelerate import Accelerator
from accelerate.utils import set_seed
from datasets import load_dataset
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm
from transformers import (
AutoTokenizer,
AutoModelForCausalLM
)
from eval_metric import compute_metric_stmt
from eval_utils import compute_mean_logp | 4,319 | generated_texts = tokenizer.batch_decode(batch_pred, skip_special_tokens=True)
mean_logp = compute_mean_logp(batch_scores, batch_pred, tokenizer.pad_token_id)
return batch_task_id.tolist(), generated_texts, mean_logp
all_preds = []
all_task_ids = []
with torch.no_grad():
for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
completions = None
completion_scores = None
for seq_idx in range(args.num_return_sequences):
batch_task_id, generated_texts, mean_logp = generate_completions(batch)
if seq_idx == 0:
all_task_ids.extend(batch_task_id)
batch_size = len(batch_task_id)
completions = [[] for _ in range(batch_size)]
completion_scores = [[] for _ in range(batch_size)]
for j in range(batch_size):
completions[j].append(generated_texts[j])
completion_scores[j].append(mean_logp[j])
if args.num_return_sequences == 1:
all_preds.extend([c[0] for c in completions])
else:
for c, cs in zip(completions, completion_scores):
max_score = max(cs)
max_index = cs.index(max_score)
all_preds.append(c[max_index])
with open(f"{args.output_dir}/prediction.jsonl", "w", encoding="utf-8") as f_pred:
id_processed = set()
for idx, p in zip(all_task_ids, all_preds):
if index2taskid[idx] not in id_processed:
f_pred.write(json.dumps({"task_id": index2taskid[idx], "pred": p}) + "\n")
id_processed.add(index2taskid[idx])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# model inference args
parser.add_argument("--language", type=str, required=True, help="language name")
parser.add_argument("--model_name_or_path", default=None, type=str, help="Pre-trained Model Path")
parser.add_argument(
"--model_type",
type=str,
default="codelm",
choices=["codelm", "codelm_cfc"],
help="Model type to be loaded"
)
parser.add_argument("--prompt_file", type=str, default=None, help="file with a list of prompts")
parser.add_argument("--gen_length", type=int, default=50, help="max length of generated token sequence")
parser.add_argument("--max_seq_length", type=int, default=2048, help="max length of prompt")
parser.add_argument(
"--cfc_seq_length",
type=int,
default=512,
help="For model_type=codelm_cfc: Text sequence length corresponding to the retrieved nodes"
)
parser.add_argument(
"--min_cfc_score",
type=float,
default=float('-inf'),
help="For model_type=codelm_cfc: min score of a chunk to be considered as CFC chunk"
)
parser.add_argument("--batch_size", type=int, default=32, help="batch size for code completion")
parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped")
parser.add_argument("--cache_dir", type=str, default=None)
parser.add_argument(
"--temperature",
type=float,
default=0.2,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling"
)
parser.add_argument("--output_dir", type=str, default="output_dir", help="output directory to save predictions")
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.95)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.")
parser.add_argument("--repetition_penalty", type=float, default=1.0, help="The parameter for repetition penalty.")
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=1,
help="The number of processes to use for the preprocessing."
)
parser.add_argument(
"--overwrite_cache",
type=bool,
default=False,
help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--dtype", type=str, default='bf16')
parser.add_argument("--do_sample", action="store_true", help="whether we do sampling or greedy/beam-search")
parser.add_argument("--num_beams", type=int, default=1, help="num of beam for beam-search")
# compute metric args
parser.add_argument(
"--ts_lib",
type=str,
default="build/python-lang-parser.so",
help="tree-sitter lib for tokenize code"
)
# only compute metric
parser.add_argument("--only_compute_metric", action="store_true", help="only compute metric")
args = parser.parse_args()
set_seed(args.seed, device_specific=False)
if args.num_return_sequences > 1:
assert args.do_sample, "sampling must be set to True when num_return_sequences > 1"
accelerator = Accelerator()
if not args.only_compute_metric:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, trust_remote_code=True)
tokenized_datasets, index2taskid = build_datasets(args, tokenizer)
model_inference(tokenized_datasets, index2taskid, tokenizer)
# check if the process is the main process
if accelerator.is_main_process:
| # Copyright Amazon.com, Inc. or its affiliates. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
COMMENT_SYMBOL = {
"python": "#",
"java": "//",
"csharp": "//",
"typescript": "//"
}
def custom_data_collator(features):
first = features[0]
batch = {}
for k, v in first.items():
if v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([f[k] for f in features]))
else:
batch[k] = torch.tensor([f[k] for f in features])
if v is not None and isinstance(v, str):
batch[k] = [f[k] for f in features]
return batch
def build_datasets(args, tokenizer):
# Initialize the model and tokenizer
# when generating, we will use the logits of right-most token to predict the next token
# so the padding should be on the left
tokenizer.padding_side = "left"
tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token else tokenizer.bos_token
# load the files into Dataset
raw_datasets = load_dataset("json", data_files=args.prompt_file, cache_dir=args.cache_dir)
raw_datasets = raw_datasets["train"]
raw_datasets = raw_datasets.map(lambda example, idx: {'index': idx, **example}, with_indices=True)
index2taskid = {idx: md["task_id"] for idx, md in zip(raw_datasets["index"], raw_datasets["metadata"])}
column_names = raw_datasets.column_names
# Prompt composition
def prepare_features(examples):
tokenizer.truncation_side = "left"
tokenized_inputs = tokenizer(
examples["prompt"],
padding="max_length",
truncation=True,
max_length=args.max_seq_length - args.gen_length
)
features = {k: t for k, t in tokenized_inputs.items()}
features["index"] = examples["index"]
return features
def prepare_features_cfc(examples):
max_prompt_length = args.max_seq_length - args.gen_length
use_key = "list"
crossfile_context = []
if use_key == "text":
crossfile_context = [ex["text"] for ex in examples["crossfile_context"]]
else:
ls_sym = COMMENT_SYMBOL[args.language]
num_chunk_inc_prompt = []
augmented_prompt = 0
for cfc_chunks in examples["crossfile_context"]:
cfc_chunks = cfc_chunks["list"] # a list of dict
cfc_text = ""
if cfc_chunks:
# at least 1 relevant cfc_chunk found
init_cfc_text = f"{ls_sym} Here are some relevant code fragments from other files of the repo:\n\n"
cfc_length = len(tokenizer.tokenize(init_cfc_text))
num_chunk_inc = 0
for cfc_idx, cfc_chunk in enumerate(cfc_chunks):
if cfc_chunk["score"] > args.min_cfc_score:
add_text = f"{ls_sym} the below code fragment is found in {cfc_chunk['filename']}" + "\n"
cfc_lines = cfc_chunk["retrieved_chunk"].split('\n')
add_text += "\n".join([f"{ls_sym} {cl}" for cl in cfc_lines if cl]) + "\n\n"
# check if adding chunk exceeds max length budget for CFC
add_text_len = len(tokenizer.tokenize(add_text))
if cfc_length + add_text_len <= args.cfc_seq_length:
cfc_text += add_text
cfc_length += add_text_len
num_chunk_inc += 1
else:
break
num_chunk_inc_prompt.append(num_chunk_inc)
if num_chunk_inc > 0:
cfc_text = init_cfc_text + cfc_text
augmented_prompt += 1
crossfile_context.append(cfc_text)
logger.info(
f"{augmented_prompt} out of {len(examples['crossfile_context'])} prompts are augmented with cross-file context.")
tokenizer.truncation_side = "right"
crossfile_features = tokenizer(
crossfile_context,
truncation=True,
max_length=args.cfc_seq_length
)
features = {"input_ids": [], "attention_mask": []}
tokenizer.truncation_side = "left"
for idx, prompt in enumerate(examples["prompt"]):
allowed_prompt_length = max_prompt_length - len(crossfile_features["input_ids"][idx])
prompt_feats = tokenizer(
[prompt],
truncation=True,
max_length=allowed_prompt_length
)
for k, v in prompt_feats.items():
features[k].append(crossfile_features[k][idx] + prompt_feats[k][0])
# pad to max_seq_length
tokenizer.padding_side = "left"
features = tokenizer.pad(features, padding="max_length", max_length=args.max_seq_length - args.gen_length)
features["index"] = examples["index"]
return features
if args.model_type in ["codelm", "seq2seqlm"]:
tokenized_datasets = raw_datasets.map(
prepare_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on dataset",
)
elif args.model_type == "codelm_cfc":
tokenized_datasets = raw_datasets.map(
prepare_features_cfc,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on dataset",
)
else:
raise NotImplementedError("prepare feature functions not implemented for new model type")
return tokenized_datasets, index2taskid
def model_inference(tokenized_datasets, index2taskid, tokenizer):
if args.dtype == 'fp16':
dtype = torch.float16
elif args.dtype == 'fp32':
dtype = torch.float32
elif args.dtype == 'bf16':
dtype = torch.bfloat16
elif args.dtype == 'int8':
dtype = torch.int8
else:
assert False, f'{args.dtype=} not implemented'
if args.model_type in ["codelm", "codelm_cfc"]:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
torch_dtype=dtype,
trust_remote_code=True,
revision="main"
)
else:
raise ValueError("Unknown model type")
total_samples_cnt = len(tokenized_datasets)
logger.info(f"total samples: {total_samples_cnt}")
data_sampler = SequentialSampler(tokenized_datasets)
dataloader = DataLoader(
tokenized_datasets,
sampler=data_sampler,
collate_fn=custom_data_collator,
batch_size=args.batch_size
)
model = accelerator.prepare_model(model)
dataloader = accelerator.prepare_data_loader(dataloader)
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token else tokenizer.bos_token
prompt_length = args.max_seq_length - args.gen_length
@torch.no_grad()
def generate_completions(batch):
output_dict = custom_generate.generate(
accelerator.unwrap_model(model),
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
max_length=args.max_seq_length,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
do_sample=args.do_sample,
num_beams=args.num_beams,
num_return_sequences=1,
pad_token_id=tokenizer.pad_token_id,
return_dict_in_generate=True,
output_scores=True
)
batch_task_id = batch["index"]
batch_pred = accelerator.pad_across_processes(
output_dict.sequences, dim=1, pad_index=tokenizer.pad_token_id
)
scores = torch.stack(output_dict.scores, dim=1)
batch_scores = accelerator.pad_across_processes(
scores, dim=1, pad_index=tokenizer.pad_token_id
)
# batch_scores.shape = (batch_size x num_gpus x num_return_sequences, max_length)
batch_task_id, batch_pred, batch_scores = accelerator.gather((batch_task_id, batch_pred, batch_scores))
batch_pred = batch_pred[:, prompt_length:]
generated_texts = tokenizer.batch_decode(batch_pred, skip_special_tokens=True)
mean_logp = compute_mean_logp(batch_scores, batch_pred, tokenizer.pad_token_id)
return batch_task_id.tolist(), generated_texts, mean_logp
all_preds = []
all_task_ids = []
with torch.no_grad():
for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
completions = None
completion_scores = None
for seq_idx in range(args.num_return_sequences):
batch_task_id, generated_texts, mean_logp = generate_completions(batch)
if seq_idx == 0:
all_task_ids.extend(batch_task_id)
batch_size = len(batch_task_id)
completions = [[] for _ in range(batch_size)]
completion_scores = [[] for _ in range(batch_size)]
for j in range(batch_size):
completions[j].append(generated_texts[j])
completion_scores[j].append(mean_logp[j])
if args.num_return_sequences == 1:
all_preds.extend([c[0] for c in completions])
else:
for c, cs in zip(completions, completion_scores):
max_score = max(cs)
max_index = cs.index(max_score)
all_preds.append(c[max_index])
with open(f"{args.output_dir}/prediction.jsonl", "w", encoding="utf-8") as f_pred:
id_processed = set()
for idx, p in zip(all_task_ids, all_preds):
if index2taskid[idx] not in id_processed:
f_pred.write(json.dumps({"task_id": index2taskid[idx], "pred": p}) + "\n")
id_processed.add(index2taskid[idx])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# model inference args
parser.add_argument("--language", type=str, required=True, help="language name")
parser.add_argument("--model_name_or_path", default=None, type=str, help="Pre-trained Model Path")
parser.add_argument(
"--model_type",
type=str,
default="codelm",
choices=["codelm", "codelm_cfc"],
help="Model type to be loaded"
)
parser.add_argument("--prompt_file", type=str, default=None, help="file with a list of prompts")
parser.add_argument("--gen_length", type=int, default=50, help="max length of generated token sequence")
parser.add_argument("--max_seq_length", type=int, default=2048, help="max length of prompt")
parser.add_argument(
"--cfc_seq_length",
type=int,
default=512,
help="For model_type=codelm_cfc: Text sequence length corresponding to the retrieved nodes"
)
parser.add_argument(
"--min_cfc_score",
type=float,
default=float('-inf'),
help="For model_type=codelm_cfc: min score of a chunk to be considered as CFC chunk"
)
parser.add_argument("--batch_size", type=int, default=32, help="batch size for code completion")
parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped")
parser.add_argument("--cache_dir", type=str, default=None)
parser.add_argument(
"--temperature",
type=float,
default=0.2,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling"
)
parser.add_argument("--output_dir", type=str, default="output_dir", help="output directory to save predictions")
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.95)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.")
parser.add_argument("--repetition_penalty", type=float, default=1.0, help="The parameter for repetition penalty.")
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=1,
help="The number of processes to use for the preprocessing."
)
parser.add_argument(
"--overwrite_cache",
type=bool,
default=False,
help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--dtype", type=str, default='bf16')
parser.add_argument("--do_sample", action="store_true", help="whether we do sampling or greedy/beam-search")
parser.add_argument("--num_beams", type=int, default=1, help="num of beam for beam-search")
# compute metric args
parser.add_argument(
"--ts_lib",
type=str,
default="build/python-lang-parser.so",
help="tree-sitter lib for tokenize code"
)
# only compute metric
parser.add_argument("--only_compute_metric", action="store_true", help="only compute metric")
args = parser.parse_args()
set_seed(args.seed, device_specific=False)
if args.num_return_sequences > 1:
assert args.do_sample, "sampling must be set to True when num_return_sequences > 1"
accelerator = Accelerator()
if not args.only_compute_metric:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, trust_remote_code=True)
tokenized_datasets, index2taskid = build_datasets(args, tokenizer)
model_inference(tokenized_datasets, index2taskid, tokenizer)
# check if the process is the main process
if accelerator.is_main_process: | compute_metric_stmt(args) | 0 | 2023-10-16 04:23:03+00:00 | 8k |
uukuguy/multi_loras | multi_loras/slora/models/peft/lora_adapter.py | [
{
"identifier": "get_lora_config_json",
"path": "multi_loras/slora/mprophet/lora_config.py",
"snippet": "def get_lora_config_json(name):\n if \"alpaca-lora-7b\" in name:\n config = {\"base_model_name_or_path\": \"decapoda-research/llama-7b-hf\",\n \"bias\": \"none\",\n \"enable_lora\": None,\n \"fan_in_fan_out\": False,\n \"inference_mode\": True,\n \"lora_alpha\": 16,\n \"lora_dropout\": 0.05,\n \"merge_weights\": False,\n \"modules_to_save\": None,\n \"peft_type\": \"LORA\",\n \"r\": 16,\n \"target_modules\": [\n \"q_proj\",\n \"k_proj\",\n \"v_proj\",\n \"o_proj\"\n ],\n \"task_type\": \"CAUSAL_LM\"\n }\n elif \"bactrian-x-llama-7b-lora\" in name:\n config = {\n \"base_model_name_or_path\": \"decapoda-research/llama-7b-hf\",\n \"bias\": \"none\",\n \"fan_in_fan_out\": False,\n \"inference_mode\": True,\n \"init_lora_weights\": True,\n \"lora_alpha\": 16,\n \"lora_dropout\": 0.05,\n \"modules_to_save\": None,\n \"peft_type\": \"LORA\",\n \"r\": 64,\n \"target_modules\": [\n \"q_proj\",\n \"k_proj\",\n \"v_proj\",\n \"o_proj\"\n ],\n \"task_type\": \"CAUSAL_LM\"\n }\n elif \"dummy-lora-7b-rank-\" in name:\n config = {\"base_model_name_or_path\": \"huggyllama/llama-7b\",\n \"bias\": \"none\",\n \"enable_lora\": None,\n \"fan_in_fan_out\": False,\n \"inference_mode\": True,\n \"lora_alpha\": 16,\n \"lora_dropout\": 0.05,\n \"merge_weights\": False,\n \"modules_to_save\": None,\n \"peft_type\": \"LORA\",\n \"r\": int(re.search(r'rank-(\\d+)', name).group(1)),\n \"target_modules\": [\n \"q_proj\",\n \"k_proj\",\n \"v_proj\",\n \"o_proj\"\n ],\n \"task_type\": \"CAUSAL_LM\"\n }\n elif \"dummy-lora-13b-rank-\" in name:\n config = {\"base_model_name_or_path\": \"meta-llama/Llama-2-13b-hf\",\n \"bias\": \"none\",\n \"enable_lora\": None,\n \"fan_in_fan_out\": False,\n \"inference_mode\": True,\n \"lora_alpha\": 16,\n \"lora_dropout\": 0.1,\n \"merge_weights\": False,\n \"modules_to_save\": None,\n \"peft_type\": \"LORA\",\n \"r\": int(re.search(r'rank-(\\d+)', name).group(1)),\n \"target_modules\": [\n \"q_proj\",\n \"k_proj\",\n \"v_proj\",\n \"o_proj\"\n ],\n \"task_type\": \"CAUSAL_LM\"\n }\n else:\n raise Exception(f\"unrecognized: {name}\")\n return config"
},
{
"identifier": "load_hf_weights",
"path": "multi_loras/slora/models/peft/layer_weights/hf_load_utils.py",
"snippet": "def load_hf_weights(data_type, weight_dir, transformer_layer_list=None, swap=False, dummy=False):\n data_type = torch.float16 if data_type == 'fp16' else torch.float32\n if transformer_layer_list is not None:\n assert transformer_layer_list[0].data_type_ == data_type, \"type is not right\"\n\n if dummy:\n for layer in transformer_layer_list:\n layer.load_hf_weights(None, swap=swap, dummy=dummy)\n return\n\n use_safetensors = True\n files = os.listdir(weight_dir)\n candidate_files = list(filter(lambda x: x.endswith('.safetensors'), files))\n if len(candidate_files) == 0:\n use_safetensors = False\n candidate_files = list(filter(lambda x: x.endswith('.bin'), files))\n assert len(candidate_files) != 0, \"can only support pytorch tensor and safetensors format for weights.\"\n\n model_name = weight_dir.rstrip(\"/\").split(\"/\")[-1]\n # for file_ in tqdm(candidate_files, desc=f\"load {model_name}\"):\n for file_ in candidate_files:\n if use_safetensors:\n weights = safe_open(os.path.join(weight_dir, file_), 'pt', 'cpu')\n weights = {k: weights.get_tensor(k)\n for k in weights.keys()}\n else:\n weights = torch.load(os.path.join(weight_dir, file_), 'cpu')\n # for key, tensor in weights.items():\n # if \"layers.0\" in key:\n # print(key)\n # print(tensor.shape)\n\n if transformer_layer_list is not None:\n for layer in transformer_layer_list:\n layer.load_hf_weights(weights, swap=swap)\n del weights\n gc.collect()\n return"
},
{
"identifier": "LoraLayerWeight",
"path": "multi_loras/slora/models/peft/layer_weights/lora_layer_weight.py",
"snippet": "class LoraLayerWeight:\n\n def __init__(\n self,\n layer_num,\n tp_rank,\n world_size,\n lora_config,\n network_config,\n data_type=torch.float16,\n no_lora_swap=False,\n prefetch_stream=None\n ):\n self.layer_num_ = layer_num\n self.tp_rank_ = tp_rank\n self.world_size_ = world_size\n self.data_type_ = data_type\n self.lora_config = lora_config\n self.network_config = network_config\n\n # lora params\n self.q_lora_A = None\n self.q_lora_B = None\n self.k_lora_A = None\n self.k_lora_B = None\n self.v_lora_A = None\n self.v_lora_B = None\n\n self.prefetch_stream = prefetch_stream\n\n # debug\n self.no_lora_swap = no_lora_swap\n\n def load_to_torch(self, path):\n numpy_type = {\n \"fp32\": np.float32,\n \"fp16\": np.float16\n }[self.data_type_]\n torch_type = {\n \"fp32\": torch.float32,\n \"fp16\": torch.float16\n }[self.data_type_]\n return torch.from_numpy(np.fromfile(path, dtype=numpy_type)).to(torch_type)\n\n def load_dummy_weights(self, swap):\n n_embed = self.network_config[\"hidden_size\"]\n split_n_embed = n_embed // self.world_size_\n rank = self.lora_config[\"r\"]\n if not swap or self.no_lora_swap:\n self.q_lora_A = (\n torch.rand((rank, split_n_embed), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3\n self.q_lora_B = (\n torch.rand((split_n_embed, rank), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3\n self.k_lora_A = (\n torch.rand((rank, split_n_embed), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3\n self.k_lora_B = (\n torch.rand((split_n_embed, rank), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3\n self.v_lora_A = (\n torch.rand((rank, split_n_embed), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3\n self.v_lora_B = (\n torch.rand((split_n_embed, rank), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3\n self.o_lora_A = (\n torch.rand((rank, split_n_embed), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3\n self.o_lora_B = (\n torch.rand((split_n_embed, rank), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3\n else:\n self.q_lora_A_home = ((\n torch.rand((rank, split_n_embed), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3).to(\"cpu\")\n self.q_lora_A = None\n self.q_lora_B_home = ((\n torch.rand((split_n_embed, rank), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3).to(\"cpu\")\n self.q_lora_B = None\n self.k_lora_A_home = ((\n torch.rand((rank, split_n_embed), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3).to(\"cpu\")\n self.k_lora_A = None\n self.k_lora_B_home = ((\n torch.rand((split_n_embed, rank), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3).to(\"cpu\")\n self.k_lora_B = None\n self.v_lora_A_home = ((\n torch.rand((rank, split_n_embed), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3).to(\"cpu\")\n self.v_lora_A = None\n self.v_lora_B_home = ((\n torch.rand((split_n_embed, rank), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3).to(\"cpu\")\n self.v_lora_B = None\n self.o_lora_A_home = ((\n torch.rand((rank, split_n_embed), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3).to(\"cpu\")\n self.o_lora_A = None\n self.o_lora_B_home = ((\n torch.rand((split_n_embed, rank), dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2\n - 1\n ) * 1e-3).to(\"cpu\")\n self.o_lora_B = None\n\n num_head = self.network_config[\"num_attention_heads\"]\n self.w_combined_home = torch.concat([\n self.q_lora_A_home.T.reshape(rank, num_head, -1),\n self.k_lora_A_home.T.reshape(rank, num_head, -1),\n self.v_lora_A_home.T.reshape(rank, num_head, -1),\n self.o_lora_A_home.T.reshape(rank, num_head, -1),\n self.q_lora_B_home.T.reshape(rank, num_head, -1),\n self.k_lora_B_home.T.reshape(rank, num_head, -1),\n self.v_lora_B_home.T.reshape(rank, num_head, -1),\n self.o_lora_B_home.T.reshape(rank, num_head, -1)\n ]).pin_memory()\n self.w_combined_home = self.w_combined_home.reshape(2, 4 * rank, num_head, -1)\n self.w_combined = None\n return\n\n def load_hf_weights(self, weights, swap=False, dummy=False):\n if dummy:\n self.load_dummy_weights(swap)\n return\n\n if swap and not self.no_lora_swap:\n self.load_hf_weights_cpu(weights)\n return\n\n n_embed = self.network_config[\"hidden_size\"]\n split_n_embed = n_embed // self.world_size_\n\n prefix = list(weights.keys())[0]\n prefix = prefix[:prefix.find(\"layers\")] + f\"layers.{self.layer_num_}.self_attn\"\n tp_idx = (split_n_embed * self.tp_rank_, split_n_embed * (self.tp_rank_ + 1))\n\n # q_proj A, B\n if f\"{prefix}.q_proj.lora_A.weight\" in weights:\n self.q_lora_A = weights[f\"{prefix}.q_proj.lora_A.weight\"][:, tp_idx[0]:tp_idx[1]]\n self.q_lora_A = self.q_lora_A.transpose(0, 1).contiguous().to(self.data_type_)\n self.q_lora_A = self.q_lora_A.cuda()\n\n if f\"{prefix}.q_proj.lora_B.weight\" in weights:\n self.q_lora_B = weights[f\"{prefix}.q_proj.lora_B.weight\"][tp_idx[0]:tp_idx[1], :]\n self.q_lora_B = self.q_lora_B.transpose(0, 1).contiguous().to(self.data_type_)\n self.q_lora_B = self.q_lora_B.cuda()\n\n # k_proj A, B\n if f\"{prefix}.k_proj.lora_A.weight\" in weights:\n self.k_lora_A = weights[f\"{prefix}.k_proj.lora_A.weight\"][:, tp_idx[0]:tp_idx[1]]\n self.k_lora_A = self.k_lora_A.transpose(0, 1).contiguous().to(self.data_type_)\n self.k_lora_A = self.k_lora_A.cuda()\n\n if f\"{prefix}.k_proj.lora_B.weight\" in weights:\n self.k_lora_B = weights[f\"{prefix}.k_proj.lora_B.weight\"][tp_idx[0]:tp_idx[1], :]\n self.k_lora_B = self.k_lora_B.transpose(0, 1).contiguous().to(self.data_type_)\n self.k_lora_B = self.k_lora_B.cuda()\n\n # v_proj A, B\n if f\"{prefix}.v_proj.lora_A.weight\" in weights:\n self.v_lora_A = weights[f\"{prefix}.v_proj.lora_A.weight\"][:, tp_idx[0]:tp_idx[1]]\n self.v_lora_A = self.v_lora_A.transpose(0, 1).contiguous().to(self.data_type_)\n self.v_lora_A = self.v_lora_A.cuda()\n\n if f\"{prefix}.v_proj.lora_B.weight\" in weights:\n self.v_lora_B = weights[f\"{prefix}.v_proj.lora_B.weight\"][tp_idx[0]:tp_idx[1], :]\n self.v_lora_B = self.v_lora_B.transpose(0, 1).contiguous().to(self.data_type_)\n self.v_lora_B = self.v_lora_B.cuda()\n\n # o_proj A, B\n if f\"{prefix}.o_proj.lora_A.weight\" in weights:\n self.o_lora_A = weights[f\"{prefix}.o_proj.lora_A.weight\"][:, tp_idx[0]:tp_idx[1]]\n self.o_lora_A = self.o_lora_A.transpose(0, 1).contiguous().to(self.data_type_)\n self.o_lora_A = self.o_lora_A.cuda()\n\n if f\"{prefix}.o_proj.lora_B.weight\" in weights:\n self.o_lora_B = weights[f\"{prefix}.o_proj.lora_B.weight\"][tp_idx[0]:tp_idx[1], :]\n self.o_lora_B = self.o_lora_B.transpose(0, 1).contiguous().to(self.data_type_)\n self.o_lora_B = self.o_lora_B.cuda()\n\n return\n\n def load_hf_weights_cpu(self, weights):\n n_embed = self.network_config[\"hidden_size\"]\n split_n_embed = n_embed // self.world_size_\n\n prefix = list(weights.keys())[0]\n prefix = prefix[:prefix.find(\"layers\")] + f\"layers.{self.layer_num_}.self_attn\"\n tp_idx = (split_n_embed * self.tp_rank_, split_n_embed * (self.tp_rank_ + 1))\n\n # q_proj A, B\n if f\"{prefix}.q_proj.lora_A.weight\" in weights:\n self.q_lora_A_home = weights[f\"{prefix}.q_proj.lora_A.weight\"][:, tp_idx[0]:tp_idx[1]]\n self.q_lora_A_home = self.q_lora_A_home.transpose(0, 1).contiguous().to(self.data_type_).pin_memory()\n self.q_lora_A = None\n\n if f\"{prefix}.q_proj.lora_B.weight\" in weights:\n self.q_lora_B_home = weights[f\"{prefix}.q_proj.lora_B.weight\"][tp_idx[0]:tp_idx[1], :]\n self.q_lora_B_home = self.q_lora_B_home.transpose(0, 1).contiguous().to(self.data_type_).pin_memory()\n self.q_lora_B = None\n\n # k_proj A, B\n if f\"{prefix}.k_proj.lora_A.weight\" in weights:\n self.k_lora_A_home = weights[f\"{prefix}.k_proj.lora_A.weight\"][:, tp_idx[0]:tp_idx[1]]\n self.k_lora_A_home = self.k_lora_A_home.transpose(0, 1).contiguous().to(self.data_type_).pin_memory()\n self.k_lora_A = None\n\n if f\"{prefix}.k_proj.lora_B.weight\" in weights:\n self.k_lora_B_home = weights[f\"{prefix}.k_proj.lora_B.weight\"][tp_idx[0]:tp_idx[1], :]\n self.k_lora_B_home = self.k_lora_B_home.transpose(0, 1).contiguous().to(self.data_type_).pin_memory()\n self.k_lora_B = None\n\n # v_proj A, B\n if f\"{prefix}.v_proj.lora_A.weight\" in weights:\n self.v_lora_A_home = weights[f\"{prefix}.v_proj.lora_A.weight\"][:, tp_idx[0]:tp_idx[1]]\n self.v_lora_A_home = self.v_lora_A_home.transpose(0, 1).contiguous().to(self.data_type_).pin_memory()\n self.v_lora_A = None\n\n if f\"{prefix}.v_proj.lora_B.weight\" in weights:\n self.v_lora_B_home = weights[f\"{prefix}.v_proj.lora_B.weight\"][tp_idx[0]:tp_idx[1], :]\n self.v_lora_B_home = self.v_lora_B_home.transpose(0, 1).contiguous().to(self.data_type_).pin_memory()\n self.v_lora_B = None\n\n # o_proj A, B\n if f\"{prefix}.o_proj.lora_A.weight\" in weights:\n self.o_lora_A_home = weights[f\"{prefix}.o_proj.lora_A.weight\"][:, tp_idx[0]:tp_idx[1]]\n self.o_lora_A_home = self.o_lora_A_home.transpose(0, 1).contiguous().to(self.data_type_).pin_memory()\n self.o_lora_A = None\n\n if f\"{prefix}.o_proj.lora_B.weight\" in weights:\n self.o_lora_B_home = weights[f\"{prefix}.o_proj.lora_B.weight\"][tp_idx[0]:tp_idx[1], :]\n self.o_lora_B_home = self.o_lora_B_home.transpose(0, 1).contiguous().to(self.data_type_).pin_memory()\n self.o_lora_B = None\n\n rank = self.lora_config[\"r\"]\n num_head = self.network_config[\"num_attention_heads\"]\n self.w_combined_home = torch.concat([\n self.q_lora_A_home.T.reshape(rank, num_head, -1),\n self.k_lora_A_home.T.reshape(rank, num_head, -1),\n self.v_lora_A_home.T.reshape(rank, num_head, -1),\n self.o_lora_A_home.T.reshape(rank, num_head, -1),\n self.q_lora_B_home.T.reshape(rank, num_head, -1),\n self.k_lora_B_home.T.reshape(rank, num_head, -1),\n self.v_lora_B_home.T.reshape(rank, num_head, -1),\n self.o_lora_B_home.T.reshape(rank, num_head, -1)\n ]).pin_memory()\n self.w_combined_home = self.w_combined_home.reshape(2, 4 * rank, num_head, -1)\n self.w_combined = None\n\n return\n\n def load_to_gpu(self, prefetch=False, bmm=False):\n if not bmm:\n if self.w_combined is None:\n if prefetch:\n self.w_combined = self.w_combined_home.to(\"cuda\", non_blocking=True)\n else:\n self.w_combined = self.w_combined_home.to(\"cuda\", non_blocking=True)\n else:\n if self.q_lora_A is None:\n self.q_lora_A = self.q_lora_A_home.to(\"cuda\", non_blocking=True)\n self.q_lora_B = self.q_lora_B_home.to(\"cuda\", non_blocking=True)\n self.k_lora_A = self.k_lora_A_home.to(\"cuda\", non_blocking=True)\n self.k_lora_B = self.k_lora_B_home.to(\"cuda\", non_blocking=True)\n self.v_lora_A = self.v_lora_A_home.to(\"cuda\", non_blocking=True)\n self.v_lora_B = self.v_lora_B_home.to(\"cuda\", non_blocking=True)\n self.o_lora_A = self.o_lora_A_home.to(\"cuda\", non_blocking=True)\n self.o_lora_B = self.o_lora_B_home.to(\"cuda\", non_blocking=True)\n\n def offload_from_gpu(self):\n if self.no_lora_swap:\n return\n #assert self.q_lora_A is not None\n self.w_combined = None\n self.q_lora_A = None\n self.q_lora_B = None\n self.k_lora_A = None\n self.k_lora_B = None\n self.v_lora_A = None\n self.v_lora_B = None\n self.o_lora_A = None\n self.o_lora_B = None"
},
{
"identifier": "hf_load_config",
"path": "multi_loras/slora/utils/model_load.py",
"snippet": "def hf_load_config(weights_dir, mode=\"adapter\"):\n is_local = os.path.isdir(weights_dir)\n if not is_local:\n # Use file lock to prevent multiple processes from\n # downloading the same model weights at the same time.\n with get_lock(model_name_or_path=weights_dir):\n weights_dir = snapshot_download(weights_dir,\n allow_patterns=[\"*.bin\", \"*.json\"])\n config_name = \"adapter_config.json\" if mode == \"adapter\" else \"config.json\"\n with open(os.path.join(weights_dir, config_name), \"r\") as f:\n return json.load(f), weights_dir"
}
] | import re
import torch
import os
from ...mprophet.lora_config import get_lora_config_json
from .layer_weights.hf_load_utils import load_hf_weights
from .layer_weights.lora_layer_weight import LoraLayerWeight
from ...utils.model_load import hf_load_config | 5,710 |
def get_lora_config(lora_dir, dummy):
if dummy:
return get_lora_config_json(lora_dir), lora_dir
else:
lora_dir = re.sub(r'-(\d+)$', '', lora_dir)
|
def get_lora_config(lora_dir, dummy):
if dummy:
return get_lora_config_json(lora_dir), lora_dir
else:
lora_dir = re.sub(r'-(\d+)$', '', lora_dir) | return hf_load_config(lora_dir) | 3 | 2023-10-16 02:39:47+00:00 | 8k |
MobileLLM/AutoDroid | droidbot/device_state.py | [
{
"identifier": "md5",
"path": "droidbot/utils.py",
"snippet": "def md5(input_str):\n import hashlib\n return hashlib.md5(input_str.encode('utf-8')).hexdigest()"
},
{
"identifier": "TouchEvent",
"path": "droidbot/input_event.py",
"snippet": "class TouchEvent(UIEvent):\n \"\"\"\n a touch on screen\n \"\"\"\n\n def __init__(self, x=None, y=None, view=None, event_dict=None):\n super().__init__(view)\n self.event_type = KEY_TouchEvent\n self.x = x\n self.y = y\n self.view = view\n if event_dict is not None:\n self.__dict__.update(event_dict)\n\n @staticmethod\n def get_random_instance(device, app):\n x = random.uniform(0, device.get_width())\n y = random.uniform(0, device.get_height())\n return TouchEvent(x, y)\n\n def send(self, device):\n x, y = UIEvent.get_xy(x=self.x, y=self.y, view=self.view)\n device.view_long_touch(x=x, y=y, duration=200)\n return True\n\n def get_event_str(self, state):\n if self.view is not None:\n return f\"{self.__class__.__name__}({UIEvent.view_str(state, self.view)})\"\n elif self.x is not None and self.y is not None:\n return \"%s(state=%s, x=%s, y=%s)\" % (self.__class__.__name__, state.state_str, self.x, self.y)\n else:\n msg = \"Invalid %s!\" % self.__class__.__name__\n raise InvalidEventException(msg)\n\n def get_views(self):\n return [self.view] if self.view else []"
},
{
"identifier": "LongTouchEvent",
"path": "droidbot/input_event.py",
"snippet": "class LongTouchEvent(UIEvent):\n \"\"\"\n a long touch on screen\n \"\"\"\n\n def __init__(self, x=None, y=None, view=None, duration=2000, event_dict=None):\n super().__init__(view)\n self.event_type = KEY_LongTouchEvent\n self.x = x\n self.y = y\n self.view = view\n self.duration = duration\n if event_dict is not None:\n self.__dict__.update(event_dict)\n\n @staticmethod\n def get_random_instance(device, app):\n x = random.uniform(0, device.get_width())\n y = random.uniform(0, device.get_height())\n return LongTouchEvent(x, y)\n\n def send(self, device):\n x, y = UIEvent.get_xy(x=self.x, y=self.y, view=self.view)\n device.view_long_touch(x=x, y=y, duration=self.duration)\n return True\n\n def get_event_str(self, state):\n if self.view is not None:\n return f\"{self.__class__.__name__}({UIEvent.view_str(state, self.view)})\"\n elif self.x is not None and self.y is not None:\n return \"%s(state=%s, x=%s, y=%s)\" %\\\n (self.__class__.__name__, state.state_str, self.x, self.y)\n else:\n msg = \"Invalid %s!\" % self.__class__.__name__\n raise InvalidEventException(msg)\n\n def get_views(self):\n return [self.view] if self.view else []"
},
{
"identifier": "ScrollEvent",
"path": "droidbot/input_event.py",
"snippet": "class ScrollEvent(UIEvent):\n \"\"\"\n swipe gesture\n \"\"\"\n\n def __init__(self, x=None, y=None, view=None, direction=\"DOWN\", event_dict=None):\n super().__init__(view)\n self.event_type = KEY_ScrollEvent\n self.x = x\n self.y = y\n self.view = view\n self.direction = direction\n\n if event_dict is not None:\n self.__dict__.update(event_dict)\n\n @staticmethod\n def get_random_instance(device, app):\n x = random.uniform(0, device.get_width())\n y = random.uniform(0, device.get_height())\n direction = random.choice([\"UP\", \"DOWN\", \"LEFT\", \"RIGHT\"])\n return ScrollEvent(x, y, direction)\n\n def send(self, device):\n if self.view is not None:\n from .device_state import DeviceState\n width = DeviceState.get_view_width(view_dict=self.view)\n height = DeviceState.get_view_height(view_dict=self.view)\n else:\n width = device.get_width()\n height = device.get_height()\n\n x, y = UIEvent.get_xy(x=self.x, y=self.y, view=self.view)\n if not x or not y:\n # If no view and no coordinate specified, use the screen center coordinate\n x = width / 2\n y = height / 2\n\n start_x, start_y = x, y\n end_x, end_y = x, y\n duration = 500\n\n drag_length = 3/10\n # bias = 5/11\n\n # if self.direction == \"UP\":\n # start_y -= height * 2 / 5\n # end_y += height * 2 / 5\n # elif self.direction == \"DOWN\":\n # start_y += height * 2 / 5\n # end_y -= height * 2 / 5\n # elif self.direction == \"LEFT\":\n # start_x -= width * 2 / 5\n # end_x += width * 2 / 5\n # elif self.direction == \"RIGHT\":\n # start_x += width * 2 / 5\n # end_x -= width * 2 / 5\n\n if self.direction == \"UP\":\n start_y -= height * drag_length\n end_y += height * drag_length\n # do not drag from the center to avoid mis-touch\n # start_x += width * bias\n # end_x += width * bias\n # print(height, start_y, end_y, start_x, end_x, width)\n elif self.direction == \"DOWN\":\n start_y += height * drag_length\n end_y -= height * drag_length\n # do not drag from the center to avoid mis-touch\n # start_x += width * bias\n # end_x += width * bias\n # print(height, start_y, end_y)\n elif self.direction == \"LEFT\":\n start_x -= width * drag_length\n end_x += width * drag_length\n elif self.direction == \"RIGHT\":\n start_x += width * drag_length\n end_x -= width * drag_length\n '''\n this has been used for special case for calendar application. You can change 200 due to other special cases\n '''\n if abs(end_y - start_y) >= 200:\n device.view_drag((start_x, start_y), (end_x, end_y), duration)\n return True\n\n def get_event_str(self, state):\n if self.view is not None:\n return \\\n f\"{self.__class__.__name__}({UIEvent.view_str(state, self.view)}, direction={self.direction})\"\n elif self.x is not None and self.y is not None:\n return \"%s(state=%s, x=%s, y=%s, direction=%s)\" %\\\n (self.__class__.__name__, state.state_str, self.x, self.y, self.direction)\n else:\n return \"%s(state=%s, direction=%s)\" % \\\n (self.__class__.__name__, state.state_str, self.direction)\n\n def get_views(self):\n return [self.view] if self.view else []"
},
{
"identifier": "SetTextEvent",
"path": "droidbot/input_event.py",
"snippet": "class SetTextEvent(UIEvent):\n \"\"\"\n input text to target UI\n \"\"\"\n\n @staticmethod\n def get_random_instance(device, app):\n pass\n\n def __init__(self, x=None, y=None, view=None, text=None, event_dict=None):\n super().__init__(view)\n self.event_type = KEY_SetTextEvent\n self.x = x\n self.y = y\n self.view = view\n # if text is not None:\n # text = text.replace('\"', '')\n self.text = text\n if event_dict is not None:\n self.__dict__.update(event_dict)\n\n def send(self, device):\n x, y = UIEvent.get_xy(x=self.x, y=self.y, view=self.view)\n touch_event = TouchEvent(x=x, y=y)\n touch_event.send(device)\n device.view_set_text(self.text)\n return True\n\n def get_event_str(self, state):\n if self.view is not None:\n return f\"{self.__class__.__name__}({UIEvent.view_str(state, self.view)}, text={self.text})\"\n elif self.x is not None and self.y is not None:\n return \"%s(state=%s, x=%s, y=%s, text=%s)\" %\\\n (self.__class__.__name__, state.state_str, self.x, self.y, self.text)\n else:\n msg = \"Invalid %s!\" % self.__class__.__name__\n raise InvalidEventException(msg)\n\n def get_views(self):\n return [self.view] if self.view else []"
},
{
"identifier": "KeyEvent",
"path": "droidbot/input_event.py",
"snippet": "class KeyEvent(InputEvent):\n \"\"\"\n a key pressing event\n \"\"\"\n\n def __init__(self, name=None, event_dict=None):\n super().__init__()\n self.event_type = KEY_KeyEvent\n self.name = name\n if event_dict is not None:\n self.__dict__.update(event_dict)\n\n @staticmethod\n def get_random_instance(device, app):\n key_name = random.choice(POSSIBLE_KEYS)\n return KeyEvent(key_name)\n\n def send(self, device):\n device.key_press(self.name)\n return True\n\n def get_event_str(self, state):\n return \"%s(state=%s, name=%s)\" % (self.__class__.__name__, state.state_str, self.name)"
},
{
"identifier": "UIEvent",
"path": "droidbot/input_event.py",
"snippet": "class UIEvent(InputEvent):\n \"\"\"\n This class describes a UI event of app, such as touch, click, etc\n \"\"\"\n def __init__(self, view=None):\n super().__init__()\n self.view = view\n\n def send(self, device):\n raise NotImplementedError\n\n @staticmethod\n def get_random_instance(device, app):\n if not device.is_foreground(app):\n # if current app is in background, bring it to foreground\n component = app.get_package_name()\n if app.get_main_activity():\n component += \"/%s\" % app.get_main_activity()\n return IntentEvent(Intent(suffix=component))\n\n else:\n choices = {\n TouchEvent: 6,\n LongTouchEvent: 2,\n SwipeEvent: 2\n }\n event_type = utils.weighted_choice(choices)\n return event_type.get_random_instance(device, app)\n\n @staticmethod\n def get_xy(x, y, view):\n if x and y:\n return x, y\n if view:\n from .device_state import DeviceState\n return DeviceState.get_view_center(view_dict=view)\n return x, y\n\n @staticmethod\n def view_str(state, view):\n view_class = view['class'].split('.')[-1]\n view_text = view['text'].replace('\\n', '\\\\n') if 'text' in view and view['text'] else ''\n view_text = view_text[:10] if len(view_text) > 10 else view_text\n view_short_sig = f'{state.activity_short_name}/{view_class}-{view_text}'\n return f\"state={state.state_str}, view={view['view_str']}({view_short_sig})\""
}
] | import copy
import math
import os
import pdb
import tools
import hashlib
import networkx as nx
import numpy as np
import json
import json
import json
import hashlib
import shutil
import hashlib
import re
import matplotlib.pyplot as plt
import re
from .utils import md5
from .input_event import TouchEvent, LongTouchEvent, ScrollEvent, SetTextEvent, KeyEvent, UIEvent
from treelib import Tree
from datetime import datetime
from xmlrpc.client import ServerProxy
from xmlrpc.client import ServerProxy
from PIL import Image | 5,770 |
# for view_id in enabled_view_ids:
# if view_id in touch_exclude_view_ids:
# continue
# children = self.__safe_dict_get(self.views[view_id], 'children')
# if children and len(children) > 0:
# continue
# possible_events.append(TouchEvent(view=self.views[view_id]))
# For old Android navigation bars
# possible_events.append(KeyEvent(name="MENU"))
self.possible_events = possible_events
return [] + possible_events
def _get_self_ancestors_property(self, view, key, default=None):
all_views = [view] + [self.views[i] for i in self.get_all_ancestors(view)]
for v in all_views:
value = self.__safe_dict_get(v, key)
if value:
return value
return default
def _merge_text(self, view_text, content_description):
text = ''
if view_text:
view_text = view_text.replace('\n', ' ')
view_text = f'{view_text[:20]}...' if len(view_text) > 20 else view_text
text += view_text
text += ' '
if content_description:
content_description = content_description.replace('\n', ' ')
content_description = f'{content_description[:20]}...' if len(content_description) > 20 else content_description
text += content_description
return text
def _remove_view_ids(self, views):
removed_views = []
for view_desc in views:
view_desc_without_id = tools.get_view_without_id(view_desc)
removed_views.append(view_desc_without_id)
return removed_views
def get_described_actions_bk(self, prefix=''):
"""
Get a text description of current state
"""
# import pdb;pdb.set_trace()
enabled_view_ids = []
for view_dict in self.views:
# exclude navigation bar if exists
if self.__safe_dict_get(view_dict, 'visible') and \
self.__safe_dict_get(view_dict, 'resource_id') not in \
['android:id/navigationBarBackground',
'android:id/statusBarBackground']:
enabled_view_ids.append(view_dict['temp_id'])
text_frame = "<p id=@ class='&'>#</p>"
btn_frame = "<button id=@ class='&' checked=$>#</button>"
input_frame = "<input id=@ class='&' >#</input>"
scroll_down_frame = "<div id=@ class='scroller'>scroll down</div>"
scroll_up_frame = "<div id=@ class='scroller'>scroll up</div>"
view_descs = []
available_actions = []
for view_id in enabled_view_ids:
view = self.views[view_id]
clickable = self._get_self_ancestors_property(view, 'clickable')
scrollable = self.__safe_dict_get(view, 'scrollable')
checkable = self._get_self_ancestors_property(view, 'checkable')
long_clickable = self._get_self_ancestors_property(view, 'long_clickable')
editable = self.__safe_dict_get(view, 'editable')
actionable = clickable or scrollable or checkable or long_clickable or editable
checked = self.__safe_dict_get(view, 'checked', default=False)
selected = self.__safe_dict_get(view, 'selected', default=False)
content_description = self.__safe_dict_get(view, 'content_description', default='')
view_text = self.__safe_dict_get(view, 'text', default='')
view_class = self.__safe_dict_get(view, 'class').split('.')[-1]
if not content_description and not view_text and not scrollable: # actionable?
continue
# text = self._merge_text(view_text, content_description)
# view_status = ''
if editable:
# view_status += 'editable '
view_desc = input_frame.replace('@', str(len(view_descs))).replace('#', view_text)
if content_description:
view_desc = view_desc.replace('&', content_description)
else:
view_desc = view_desc.replace(" class='&'", "")
view_descs.append(view_desc)
available_actions.append(SetTextEvent(view=view, text='HelloWorld'))
elif (clickable or checkable or long_clickable):
view_desc = btn_frame.replace('@', str(len(view_descs))).replace('#', view_text).replace('$', str(checked or selected))
# import pdb;pdb.set_trace()
if content_description:
view_desc = view_desc.replace('&', content_description)
else:
view_desc = view_desc.replace(" class='&'", "")
view_descs.append(view_desc)
available_actions.append(TouchEvent(view=view))
elif scrollable:
view_descs.append(scroll_up_frame.replace('@', str(len(view_descs))))#.replace('&', view_class).replace('#', text))
available_actions.append(ScrollEvent(view=view, direction='UP'))
view_descs.append(scroll_down_frame.replace('@', str(len(view_descs))))#.replace('&', view_class).replace('#', text))
available_actions.append(ScrollEvent(view=view, direction='DOWN'))
else:
view_desc = text_frame.replace('@', str(len(view_descs))).replace('#', view_text)
if content_description:
view_desc = view_desc.replace('&', content_description)
else:
view_desc = view_desc.replace(" class='&'", "")
view_descs.append(view_desc)
available_actions.append(TouchEvent(view=view))
view_descs.append(f"<button id={len(view_descs)} class='ImageButton'>go back</button>")
|
class DeviceState(object):
"""
the state of the current device
"""
def __init__(self, device, views, foreground_activity, activity_stack, background_services,
tag=None, screenshot_path=None):
self.device = device
self.foreground_activity = foreground_activity
self.activity_stack = activity_stack if isinstance(activity_stack, list) else []
self.background_services = background_services
if tag is None:
tag = datetime.now().strftime("%Y-%m-%d_%H%M%S")
self.tag = tag
self.screenshot_path = screenshot_path
self.views = self.__parse_views(views)
self.bk_views = copy.deepcopy(self.views)
self.view_graph = self._build_view_graph()
# self._adjust_view_clickability()
self.view_tree = {}
self.__assemble_view_tree(self.view_tree, self.views)
self.__generate_view_strs()
self.state_str = self.__get_hashed_state_str()
self.structure_str = self.__get_content_free_state_str()
self.search_content = self.__get_search_content()
self.possible_events = None
self.width = device.get_width(refresh=True)
self.height = device.get_height(refresh=False)
self._save_important_view_ids()
@property
def activity_short_name(self):
return self.foreground_activity.split('.')[-1]
def _save_important_view_ids(self):
_, _, _, important_view_ids = self.get_described_actions(remove_time_and_ip=False)
ids_path = self.device.output_dir +'/states_view_ids'
if not os.path.exists(ids_path):
os.mkdir(ids_path)
# if not isinstance(current_state, str):
# current_state_str = current_state.state_str
# else:
# current_state_str = current_state
important_view_id_path = self.device.output_dir +'/states_view_ids/'+ self.state_str + '.txt'
f = open(important_view_id_path, 'w')
f.write(str(important_view_ids))
f.close()
def __get_hashed_state_str(self):
state, _, _, _ = self.get_described_actions(remove_time_and_ip=True)
hashed_string = tools.hash_string(state)
return hashed_string
def to_dict(self):
state = {'tag': self.tag,
'state_str': self.state_str,
'state_str_content_free': self.structure_str,
'foreground_activity': self.foreground_activity,
'activity_stack': self.activity_stack,
'background_services': self.background_services,
'width': self.width,
'height': self.height,
'views': self.views}
return state
def to_json(self):
return json.dumps(self.to_dict(), indent=2)
def __parse_views(self, raw_views):
views = []
if not raw_views or len(raw_views) == 0:
return views
for view_dict in raw_views:
# # Simplify resource_id
# resource_id = view_dict['resource_id']
# if resource_id is not None and ":" in resource_id:
# resource_id = resource_id[(resource_id.find(":") + 1):]
# view_dict['resource_id'] = resource_id
views.append(view_dict)
return views
def __assemble_view_tree(self, root_view, views):
if not len(self.view_tree): # bootstrap
self.view_tree = copy.deepcopy(views[0])
self.__assemble_view_tree(self.view_tree, views)
else:
children = list(enumerate(root_view["children"]))
if not len(children):
return
for i, j in children:
root_view["children"][i] = copy.deepcopy(self.views[j])
self.__assemble_view_tree(root_view["children"][i], views)
def __generate_view_strs(self):
for view_dict in self.views:
self.__get_view_str(view_dict)
# self.__get_view_structure(view_dict)
@staticmethod
def __calculate_depth(views):
root_view = None
for view in views:
if DeviceState.__safe_dict_get(view, 'parent') == -1:
root_view = view
break
DeviceState.__assign_depth(views, root_view, 0)
@staticmethod
def __assign_depth(views, view_dict, depth):
view_dict['depth'] = depth
for view_id in DeviceState.__safe_dict_get(view_dict, 'children', []):
DeviceState.__assign_depth(views, views[view_id], depth + 1)
def __get_state_str(self):
state_str_raw = self.__get_state_str_raw()
return md5(state_str_raw)
def __get_state_str_raw(self):
if self.device.humanoid is not None:
proxy = ServerProxy("http://%s/" % self.device.humanoid)
return proxy.render_view_tree(json.dumps({
"view_tree": self.view_tree,
"screen_res": [self.device.display_info["width"],
self.device.display_info["height"]]
}))
else:
view_signatures = set()
for view in self.views:
view_signature = DeviceState.__get_view_signature(view)
if view_signature:
view_signatures.add(view_signature)
return "%s{%s}" % (self.foreground_activity, ",".join(sorted(view_signatures)))
def __get_content_free_state_str(self):
if self.device.humanoid is not None:
proxy = ServerProxy("http://%s/" % self.device.humanoid)
state_str = proxy.render_content_free_view_tree(json.dumps({
"view_tree": self.view_tree,
"screen_res": [self.device.display_info["width"],
self.device.display_info["height"]]
}))
else:
view_signatures = set()
for view in self.views:
view_signature = DeviceState.__get_content_free_view_signature(view)
if view_signature:
view_signatures.add(view_signature)
state_str = "%s{%s}" % (self.foreground_activity, ",".join(sorted(view_signatures)))
return hashlib.md5(state_str.encode('utf-8')).hexdigest()
def __get_search_content(self):
"""
get a text for searching the state
:return: str
"""
words = [",".join(self.__get_property_from_all_views("resource_id")),
",".join(self.__get_property_from_all_views("text"))]
return "\n".join(words)
def __get_property_from_all_views(self, property_name):
"""
get the values of a property from all views
:return: a list of property values
"""
property_values = set()
for view in self.views:
property_value = DeviceState.__safe_dict_get(view, property_name, None)
if property_value:
property_values.add(property_value)
return property_values
def save2dir(self, output_dir=None):
try:
if output_dir is None:
if self.device.output_dir is None:
return
else:
output_dir = os.path.join(self.device.output_dir, "states")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dest_state_json_path = "%s/state_%s.json" % (output_dir, self.tag)
if self.device.adapters[self.device.minicap]:
dest_screenshot_path = "%s/screen_%s.jpg" % (output_dir, self.tag)
else:
dest_screenshot_path = "%s/screen_%s.png" % (output_dir, self.tag)
state_json_file = open(dest_state_json_path, "w")
state_json_file.write(self.to_json())
state_json_file.close()
shutil.copyfile(self.screenshot_path, dest_screenshot_path)
self.screenshot_path = dest_screenshot_path
# from PIL.Image import Image
# if isinstance(self.screenshot_path, Image):
# self.screenshot_path.save(dest_screenshot_path)
except Exception as e:
self.device.logger.warning(e)
def save_view_img(self, view_dict, output_dir=None):
try:
if output_dir is None:
if self.device.output_dir is None:
return
else:
output_dir = os.path.join(self.device.output_dir, "views")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
view_str = view_dict['view_str']
if self.device.adapters[self.device.minicap]:
view_file_path = "%s/view_%s.jpg" % (output_dir, view_str)
else:
view_file_path = "%s/view_%s.png" % (output_dir, view_str)
if os.path.exists(view_file_path):
return
# Load the original image:
view_bound = view_dict['bounds']
original_img = Image.open(self.screenshot_path)
# view bound should be in original image bound
view_img = original_img.crop((min(original_img.width - 1, max(0, view_bound[0][0])),
min(original_img.height - 1, max(0, view_bound[0][1])),
min(original_img.width, max(0, view_bound[1][0])),
min(original_img.height, max(0, view_bound[1][1]))))
view_img.convert("RGB").save(view_file_path)
except Exception as e:
self.device.logger.warning(e)
def is_different_from(self, another_state):
"""
compare this state with another
@param another_state: DeviceState
@return: boolean, true if this state is different from other_state
"""
return self.state_str != another_state.state_str
@staticmethod
def __get_view_signature(view_dict):
"""
get the signature of the given view
@param view_dict: dict, an element of list DeviceState.views
@return:
"""
if 'signature' in view_dict:
return view_dict['signature']
view_text = DeviceState.__safe_dict_get(view_dict, 'text', "None")
if view_text is None or len(view_text) > 50:
view_text = "None"
signature = "[class]%s[resource_id]%s[text]%s[%s,%s,%s]" % \
(DeviceState.__safe_dict_get(view_dict, 'class', "None"),
DeviceState.__safe_dict_get(view_dict, 'resource_id', "None"),
view_text,
DeviceState.__key_if_true(view_dict, 'enabled'),
DeviceState.__key_if_true(view_dict, 'checked'),
DeviceState.__key_if_true(view_dict, 'selected'))
view_dict['signature'] = signature
return signature
@staticmethod
def __get_content_free_view_signature(view_dict):
"""
get the content-free signature of the given view
@param view_dict: dict, an element of list DeviceState.views
@return:
"""
if 'content_free_signature' in view_dict:
return view_dict['content_free_signature']
content_free_signature = "[class]%s[resource_id]%s" % \
(DeviceState.__safe_dict_get(view_dict, 'class', "None"),
DeviceState.__safe_dict_get(view_dict, 'resource_id', "None"))
view_dict['content_free_signature'] = content_free_signature
return content_free_signature
def __get_view_str(self, view_dict):
"""
get a string which can represent the given view
@param view_dict: dict, an element of list DeviceState.views
@return:
"""
if 'view_str' in view_dict:
return view_dict['view_str']
view_signature = DeviceState.__get_view_signature(view_dict)
parent_strs = []
for parent_id in self.get_all_ancestors(view_dict):
parent_strs.append(DeviceState.__get_view_signature(self.views[parent_id]))
parent_strs.reverse()
child_strs = []
for child_id in self.get_all_children(view_dict):
child_strs.append(DeviceState.__get_view_signature(self.views[child_id]))
child_strs.sort()
view_str = "Activity:%s\nSelf:%s\nParents:%s\nChildren:%s" % \
(self.foreground_activity, view_signature, "//".join(parent_strs), "||".join(child_strs))
view_str = hashlib.md5(view_str.encode('utf-8')).hexdigest()
view_dict['view_str'] = view_str
return view_str
def __get_view_structure(self, view_dict):
"""
get the structure of the given view
:param view_dict: dict, an element of list DeviceState.views
:return: dict, representing the view structure
"""
if 'view_structure' in view_dict:
return view_dict['view_structure']
width = DeviceState.get_view_width(view_dict)
height = DeviceState.get_view_height(view_dict)
class_name = DeviceState.__safe_dict_get(view_dict, 'class', "None")
children = {}
root_x = view_dict['bounds'][0][0]
root_y = view_dict['bounds'][0][1]
child_view_ids = self.__safe_dict_get(view_dict, 'children')
if child_view_ids:
for child_view_id in child_view_ids:
child_view = self.views[child_view_id]
child_x = child_view['bounds'][0][0]
child_y = child_view['bounds'][0][1]
relative_x, relative_y = child_x - root_x, child_y - root_y
children["(%d,%d)" % (relative_x, relative_y)] = self.__get_view_structure(child_view)
view_structure = {
"%s(%d*%d)" % (class_name, width, height): children
}
view_dict['view_structure'] = view_structure
return view_structure
@staticmethod
def __key_if_true(view_dict, key):
return key if (key in view_dict and view_dict[key]) else ""
@staticmethod
def __safe_dict_get(view_dict, key, default=None):
return_itm = view_dict[key] if (key in view_dict) else default
if return_itm == None:
return_itm = ''
return return_itm
@staticmethod
def get_view_center(view_dict):
"""
return the center point in a view
@param view_dict: dict, an element of DeviceState.views
@return: a pair of int
"""
bounds = view_dict['bounds']
return (bounds[0][0] + bounds[1][0]) / 2, (bounds[0][1] + bounds[1][1]) / 2
@staticmethod
def get_view_width(view_dict):
"""
return the width of a view
@param view_dict: dict, an element of DeviceState.views
@return: int
"""
bounds = view_dict['bounds']
return int(math.fabs(bounds[0][0] - bounds[1][0]))
@staticmethod
def get_view_height(view_dict):
"""
return the height of a view
@param view_dict: dict, an element of DeviceState.views
@return: int
"""
bounds = view_dict['bounds']
return int(math.fabs(bounds[0][1] - bounds[1][1]))
def get_all_ancestors(self, view_dict):
"""
Get temp view ids of the given view's ancestors
:param view_dict: dict, an element of DeviceState.views
:return: list of int, each int is an ancestor node id
"""
result = []
parent_id = self.__safe_dict_get(view_dict, 'parent', -1)
if 0 <= parent_id < len(self.views):
result.append(parent_id)
result += self.get_all_ancestors(self.views[parent_id])
return result
def get_all_children(self, view_dict):
"""
Get temp view ids of the given view's children
:param view_dict: dict, an element of DeviceState.views
:return: set of int, each int is a child node id
"""
children = self.__safe_dict_get(view_dict, 'children')
if not children:
return set()
children = set(children)
for child in children:
children_of_child = self.get_all_children(self.views[child])
children.union(children_of_child)
return children
def get_app_activity_depth(self, app):
"""
Get the depth of the app's activity in the activity stack
:param app: App
:return: the depth of app's activity, -1 for not found
"""
depth = 0
for activity_str in self.activity_stack:
if app.package_name in activity_str:
return depth
depth += 1
return -1
def get_possible_input(self):
"""
Get a list of possible input events for this state
:return: list of InputEvent
"""
if self.possible_events:
return [] + self.possible_events
possible_events = []
enabled_view_ids = []
touch_exclude_view_ids = set()
for view_dict in self.views:
# exclude navigation bar if exists
if self.__safe_dict_get(view_dict, 'enabled') and \
self.__safe_dict_get(view_dict, 'visible') and \
self.__safe_dict_get(view_dict, 'resource_id') not in \
['android:id/navigationBarBackground',
'android:id/statusBarBackground']:
enabled_view_ids.append(view_dict['temp_id'])
# enabled_view_ids.reverse()
for view_id in enabled_view_ids:
if self.__safe_dict_get(self.views[view_id], 'clickable'):
possible_events.append(TouchEvent(view=self.views[view_id]))
touch_exclude_view_ids.add(view_id)
touch_exclude_view_ids.union(self.get_all_children(self.views[view_id]))
for view_id in enabled_view_ids:
if self.__safe_dict_get(self.views[view_id], 'scrollable'):
possible_events.append(ScrollEvent(view=self.views[view_id], direction="UP"))
possible_events.append(ScrollEvent(view=self.views[view_id], direction="DOWN"))
possible_events.append(ScrollEvent(view=self.views[view_id], direction="LEFT"))
possible_events.append(ScrollEvent(view=self.views[view_id], direction="RIGHT"))
for view_id in enabled_view_ids:
if self.__safe_dict_get(self.views[view_id], 'checkable'):
possible_events.append(TouchEvent(view=self.views[view_id]))
touch_exclude_view_ids.add(view_id)
touch_exclude_view_ids.union(self.get_all_children(self.views[view_id]))
for view_id in enabled_view_ids:
if self.__safe_dict_get(self.views[view_id], 'long_clickable'):
possible_events.append(LongTouchEvent(view=self.views[view_id]))
for view_id in enabled_view_ids:
if self.__safe_dict_get(self.views[view_id], 'editable'):
possible_events.append(SetTextEvent(view=self.views[view_id], text="HelloWorld"))
touch_exclude_view_ids.add(view_id)
# TODO figure out what event can be sent to editable views
pass
# for view_id in enabled_view_ids:
# if view_id in touch_exclude_view_ids:
# continue
# children = self.__safe_dict_get(self.views[view_id], 'children')
# if children and len(children) > 0:
# continue
# possible_events.append(TouchEvent(view=self.views[view_id]))
# For old Android navigation bars
# possible_events.append(KeyEvent(name="MENU"))
self.possible_events = possible_events
return [] + possible_events
def _get_self_ancestors_property(self, view, key, default=None):
all_views = [view] + [self.views[i] for i in self.get_all_ancestors(view)]
for v in all_views:
value = self.__safe_dict_get(v, key)
if value:
return value
return default
def _merge_text(self, view_text, content_description):
text = ''
if view_text:
view_text = view_text.replace('\n', ' ')
view_text = f'{view_text[:20]}...' if len(view_text) > 20 else view_text
text += view_text
text += ' '
if content_description:
content_description = content_description.replace('\n', ' ')
content_description = f'{content_description[:20]}...' if len(content_description) > 20 else content_description
text += content_description
return text
def _remove_view_ids(self, views):
removed_views = []
for view_desc in views:
view_desc_without_id = tools.get_view_without_id(view_desc)
removed_views.append(view_desc_without_id)
return removed_views
def get_described_actions_bk(self, prefix=''):
"""
Get a text description of current state
"""
# import pdb;pdb.set_trace()
enabled_view_ids = []
for view_dict in self.views:
# exclude navigation bar if exists
if self.__safe_dict_get(view_dict, 'visible') and \
self.__safe_dict_get(view_dict, 'resource_id') not in \
['android:id/navigationBarBackground',
'android:id/statusBarBackground']:
enabled_view_ids.append(view_dict['temp_id'])
text_frame = "<p id=@ class='&'>#</p>"
btn_frame = "<button id=@ class='&' checked=$>#</button>"
input_frame = "<input id=@ class='&' >#</input>"
scroll_down_frame = "<div id=@ class='scroller'>scroll down</div>"
scroll_up_frame = "<div id=@ class='scroller'>scroll up</div>"
view_descs = []
available_actions = []
for view_id in enabled_view_ids:
view = self.views[view_id]
clickable = self._get_self_ancestors_property(view, 'clickable')
scrollable = self.__safe_dict_get(view, 'scrollable')
checkable = self._get_self_ancestors_property(view, 'checkable')
long_clickable = self._get_self_ancestors_property(view, 'long_clickable')
editable = self.__safe_dict_get(view, 'editable')
actionable = clickable or scrollable or checkable or long_clickable or editable
checked = self.__safe_dict_get(view, 'checked', default=False)
selected = self.__safe_dict_get(view, 'selected', default=False)
content_description = self.__safe_dict_get(view, 'content_description', default='')
view_text = self.__safe_dict_get(view, 'text', default='')
view_class = self.__safe_dict_get(view, 'class').split('.')[-1]
if not content_description and not view_text and not scrollable: # actionable?
continue
# text = self._merge_text(view_text, content_description)
# view_status = ''
if editable:
# view_status += 'editable '
view_desc = input_frame.replace('@', str(len(view_descs))).replace('#', view_text)
if content_description:
view_desc = view_desc.replace('&', content_description)
else:
view_desc = view_desc.replace(" class='&'", "")
view_descs.append(view_desc)
available_actions.append(SetTextEvent(view=view, text='HelloWorld'))
elif (clickable or checkable or long_clickable):
view_desc = btn_frame.replace('@', str(len(view_descs))).replace('#', view_text).replace('$', str(checked or selected))
# import pdb;pdb.set_trace()
if content_description:
view_desc = view_desc.replace('&', content_description)
else:
view_desc = view_desc.replace(" class='&'", "")
view_descs.append(view_desc)
available_actions.append(TouchEvent(view=view))
elif scrollable:
view_descs.append(scroll_up_frame.replace('@', str(len(view_descs))))#.replace('&', view_class).replace('#', text))
available_actions.append(ScrollEvent(view=view, direction='UP'))
view_descs.append(scroll_down_frame.replace('@', str(len(view_descs))))#.replace('&', view_class).replace('#', text))
available_actions.append(ScrollEvent(view=view, direction='DOWN'))
else:
view_desc = text_frame.replace('@', str(len(view_descs))).replace('#', view_text)
if content_description:
view_desc = view_desc.replace('&', content_description)
else:
view_desc = view_desc.replace(" class='&'", "")
view_descs.append(view_desc)
available_actions.append(TouchEvent(view=view))
view_descs.append(f"<button id={len(view_descs)} class='ImageButton'>go back</button>") | available_actions.append(KeyEvent(name='BACK')) | 5 | 2023-10-23 03:32:58+00:00 | 8k |
aws/res | tasks/tools/clean_tool.py | [
{
"identifier": "BuildTool",
"path": "tasks/tools/build_tool.py",
"snippet": "class BuildTool:\n \"\"\"\n IDEA Project Build Tool\n Handles building of individual projects under <PROJECT_ROOT>/source/idea/*\n\n Works based on standard idea directory structure:\n <PROJECT_ROOT>/\n + source/\n + idea/\n + <project-name>/\n + src/\n + <projectname>/\n + <projectname>_meta/\n + __init__.py\n + setup.py\n + resources/\n + config/\n + webapp?/\n + scripts/\n\n Build outputs will be available under:\n <PROJECT_ROOT>/\n + build/\n + <project-name>/\n \"\"\"\n\n def __init__(self, c: Context, app_name: str):\n self.c = c\n\n if app_name is None:\n raise idea.exceptions.invalid_params('app_name is required')\n\n app_dir = os.path.join(idea.props.project_source_dir, app_name)\n if not os.path.isdir(app_dir):\n raise idea.exceptions.invalid_params(f'project_dir: {app_dir} not found or does not exist')\n\n self.app_dir = app_dir\n self.release_version = idea.props.idea_release_version\n self._given_app_name = app_name\n self._app_name: Optional[str] = None\n\n @property\n def app_name(self) -> str:\n if self._app_name is not None:\n return self._app_name\n if self.has_src():\n self._app_name = idea.utils.get_package_meta(self.c, self.src_dir, 'name')\n return self._app_name\n else:\n return self._given_app_name\n\n @property\n def app_version(self) -> str:\n return idea.props.idea_release_version\n\n @property\n def output_dir(self) -> str:\n return os.path.join(idea.props.project_build_dir, self.output_archive_basename)\n\n @property\n def output_archive_basename(self) -> str:\n return self.app_name\n\n @property\n def output_archive_name(self) -> str:\n return f'{self.output_archive_basename}.tar.gz'\n\n @property\n def output_archive_file(self) -> str:\n return os.path.join(idea.props.project_build_dir, self.output_archive_name)\n\n @property\n def src_dir(self) -> str:\n return os.path.join(self.app_dir, 'src')\n\n def has_src(self) -> bool:\n return os.path.isdir(self.src_dir)\n\n @property\n def webapp_dir(self) -> str:\n return os.path.join(self.app_dir, 'webapp')\n\n @property\n def webapp_build_dir(self) -> str:\n return os.path.join(self.webapp_dir, 'build')\n\n def has_webapp(self) -> bool:\n return os.path.isdir(self.webapp_dir)\n\n @property\n def node_modules_dir(self) -> str:\n return os.path.join(self.webapp_dir, 'node_modules')\n\n def are_node_modules_installed(self) -> bool:\n return os.path.isdir(self.node_modules_dir)\n\n @property\n def resources_dir(self) -> str:\n return os.path.join(self.app_dir, 'resources')\n\n def has_resources(self) -> bool:\n return os.path.isdir(self.resources_dir)\n\n @property\n def install_dir(self) -> str:\n return os.path.join(self.app_dir, 'install')\n\n def has_install(self) -> bool:\n return os.path.isdir(self.install_dir)\n\n @property\n def config_dir(self) -> str:\n return os.path.join(self.app_dir, 'config')\n\n def has_config(self) -> bool:\n return os.path.isdir(self.config_dir)\n\n @property\n def bootstrap_dir(self) -> str:\n return os.path.join(idea.props.project_source_dir, 'idea-bootstrap')\n\n def find_app_meta_file(self) -> str:\n src_dir = self.src_dir\n files = os.listdir(src_dir)\n for file in files:\n if file.endswith('_meta'):\n return os.path.join(src_dir, file, '__init__.py')\n raise idea.exceptions.build_failed(f'could not find app meta file (__init__.py) in: {src_dir}')\n\n def clean(self):\n if self.has_src():\n src_dist = os.path.join(self.src_dir, 'dist')\n if os.path.isdir(src_dist):\n idea.console.print(f'deleting {src_dist} ...')\n shutil.rmtree(src_dist, ignore_errors=True)\n\n egg_name = self.app_name.replace('-', '_')\n egg_info_name = f'{egg_name}.egg-info'\n src_egg = os.path.join(self.src_dir, egg_info_name)\n if os.path.isdir(src_egg):\n idea.console.print(f'deleting {src_egg} ...')\n shutil.rmtree(os.path.join(self.src_dir, egg_info_name), ignore_errors=True)\n\n if self.has_webapp():\n skip_web = os.environ.get('IDEA_SKIP_WEB_BUILD', '0')\n if skip_web == '0':\n if os.path.isdir(self.webapp_build_dir):\n idea.console.print(f'deleting {self.webapp_build_dir} ...')\n shutil.rmtree(self.webapp_build_dir, ignore_errors=True)\n\n if os.path.isdir(self.output_dir):\n idea.console.print(f'deleting {self.output_dir} ...')\n shutil.rmtree(self.output_dir)\n\n if os.path.isfile(self.output_archive_file):\n idea.console.print(f'deleting {self.output_archive_file} ...')\n os.remove(self.output_archive_file)\n\n if self.app_name == 'idea-administrator':\n files = os.listdir(idea.props.deployment_administrator_dir)\n for file in files:\n if file == 'Dockerfile' or file == 'cfn_params_2_values.sh':\n continue\n file_path = os.path.join(idea.props.deployment_administrator_dir, file)\n if os.path.isfile(file_path):\n idea.console.print(f'deleting {file_path} ...')\n os.remove(os.path.join(idea.props.deployment_administrator_dir, file))\n elif os.path.isdir(file_path):\n idea.console.print(f'deleting {file_path} ...')\n shutil.rmtree(file_path)\n\n def pre_build_src(self):\n if not self.has_src():\n return\n PythonAppMetaFileUpdater(meta_file=self.find_app_meta_file()).update()\n\n def build_src(self):\n if not self.has_src():\n return\n with self.c.cd(self.src_dir):\n self.c.run(f'{idea.utils.idea_python} setup.py sdist')\n\n def pre_build_webapp(self):\n if not self.has_webapp():\n return\n webapp_dir = self.webapp_dir\n\n app_name = self.app_name\n app_version = self.app_version\n release_version = self.release_version\n\n NpmPackageJsonFileUpdater(\n package_json_file=os.path.join(webapp_dir, 'package.json'),\n app_name=app_name,\n app_version=app_version,\n release_version=release_version\n ).update()\n\n WebAppEnvFileUpdater(\n webapp_env_file=os.path.join(webapp_dir, '.env'),\n app_name=app_name,\n app_version=app_version,\n release_version=release_version\n ).update()\n\n def build_webapp(self):\n\n skip_web = os.environ.get('IDEA_SKIP_WEB_BUILD', '0')\n if skip_web == '1':\n return\n\n if not self.has_webapp():\n return\n\n with self.c.cd(self.webapp_dir):\n self.c.run('yarn install && yarn build')\n\n def copy_build_outputs(self):\n\n output_dir = self.output_dir\n shutil.rmtree(output_dir, ignore_errors=True)\n os.makedirs(output_dir, exist_ok=True)\n\n # src (sdist)\n if self.has_src():\n app_name = self.app_name\n # python does not accept server and does some funky normalization on the semver.\n # this is only applicable for pre-releases or dev branches. e.g. 3.0.0-dev.1 gets converted to 3.0.0.dev1\n normalized_python_app_version = idea.utils.get_package_meta(self.c, self.src_dir, 'version')\n sdist_name = f'{app_name}-{normalized_python_app_version}.tar.gz'\n sdist = os.path.join(self.src_dir, 'dist', sdist_name)\n shutil.copy(sdist, os.path.join(output_dir, f'{app_name}-lib.tar.gz'))\n\n # webapp\n if self.has_webapp():\n shutil.copytree(self.webapp_build_dir, os.path.join(output_dir, 'webapp'))\n\n # config\n if self.has_config():\n shutil.copytree(self.config_dir, os.path.join(output_dir, 'config'))\n\n # resources\n if self.has_resources():\n shutil.copytree(self.resources_dir, os.path.join(output_dir, 'resources'))\n shutil.copytree(self.bootstrap_dir, os.path.join(output_dir, 'resources', 'bootstrap'))\n\n def build(self):\n\n idea.console.print_header_block(f'build {self.app_name}')\n\n self.pre_build_src()\n self.build_src()\n self.pre_build_webapp()\n self.build_webapp()\n\n # copy build outputs to project build dir\n self.copy_build_outputs()"
},
{
"identifier": "PackageTool",
"path": "tasks/tools/package_tool.py",
"snippet": "class PackageTool:\n\n def __init__(self, c: Context, app_name: str, requirements_handler: Optional[Callable] = None):\n self.c = c\n self.requirements_handler = requirements_handler\n\n self.project_build_tool = BuildTool(c, app_name)\n self.data_model_build_tool: Optional[BuildTool] = None\n self.sdk_build_tool: Optional[BuildTool] = None\n\n if app_name not in {'idea-bootstrap', 'idea-dcv-connection-gateway'}:\n self.data_model_build_tool = BuildTool(c, 'idea-data-model')\n self.sdk_build_tool = BuildTool(c, 'idea-sdk')\n self.project_build_tool = BuildTool(c, app_name)\n\n @property\n def app_name(self) -> str:\n return self.project_build_tool.app_name\n\n @property\n def app_version(self) -> str:\n return self.project_build_tool.app_version\n\n @property\n def output_dir(self) -> str:\n return os.path.join(idea.props.project_dist_dir, self.output_archive_basename)\n\n @property\n def output_archive_basename(self) -> str:\n return f'{self.app_name}-{idea.props.idea_release_version}'\n\n @property\n def output_archive_name(self) -> str:\n return f'{self.output_archive_basename}.tar.gz'\n\n @property\n def output_archive_file(self) -> str:\n return os.path.join(idea.props.project_dist_dir, self.output_archive_name)\n\n def find_requirements_file(self) -> str:\n requirements_file = os.path.join(idea.props.requirements_dir, f'{self.app_name}.txt')\n if not os.path.isfile(requirements_file):\n raise idea.exceptions.build_failed(f'project requirements file not found: {requirements_file}')\n return requirements_file\n\n def clean(self):\n if os.path.isdir(self.output_dir):\n idea.console.print(f'deleting {self.output_dir} ...')\n shutil.rmtree(self.output_dir, ignore_errors=True)\n if os.path.isfile(self.output_archive_file):\n idea.console.print(f'deleting {self.output_archive_file} ...')\n os.remove(self.output_archive_file)\n\n def package(self, delete_output_dir=False):\n\n idea.console.print_header_block(f'package {self.app_name}')\n\n # create output dir\n output_dir = self.output_dir\n shutil.rmtree(output_dir, ignore_errors=True)\n os.makedirs(output_dir, exist_ok=True)\n\n if self.project_build_tool.has_src():\n # copy requirements\n idea.console.print(f'copying requirements.txt ...')\n if self.requirements_handler is not None:\n self.requirements_handler(self)\n else:\n shutil.copyfile(self.find_requirements_file(), os.path.join(output_dir, 'requirements.txt'))\n\n # copy sdk\n if self.sdk_build_tool is not None:\n self.sdk_build_tool.build()\n idea.console.print(f'copying sdk artifacts ...')\n for file in os.listdir(self.sdk_build_tool.output_dir):\n file_path = os.path.join(self.sdk_build_tool.output_dir, file)\n if os.path.isdir(file_path):\n shutil.copytree(file_path, os.path.join(output_dir, file))\n else:\n shutil.copy2(file_path, output_dir)\n\n # copy data-model\n if self.data_model_build_tool is not None:\n self.data_model_build_tool.build()\n idea.console.print(f'copying data-model artifacts ...')\n for file in os.listdir(self.data_model_build_tool.output_dir):\n file_path = os.path.join(self.data_model_build_tool.output_dir, file)\n if os.path.isdir(file_path):\n shutil.copytree(file_path, os.path.join(output_dir, file))\n else:\n shutil.copy2(file_path, output_dir)\n\n # copy project\n idea.console.print(f'copying {self.app_name} artifacts ...')\n for file in os.listdir(self.project_build_tool.output_dir):\n file_path = os.path.join(self.project_build_tool.output_dir, file)\n if os.path.isdir(file_path):\n shutil.copytree(file_path, os.path.join(output_dir, file))\n else:\n shutil.copy2(file_path, output_dir)\n\n # copy install scripts\n if self.project_build_tool.has_install():\n idea.console.print(f'copying {self.app_name} install scripts ...')\n install_dir = self.project_build_tool.install_dir\n files = os.listdir(install_dir)\n for file in files:\n shutil.copy2(os.path.join(install_dir, file), output_dir)\n\n idea.console.print(f'creating archive ...')\n shutil.make_archive(output_dir, 'gztar', output_dir)\n\n if delete_output_dir:\n shutil.rmtree(output_dir, ignore_errors=True)"
}
] | import os
import shutil
import tasks.idea as idea
from tasks.tools.build_tool import BuildTool
from tasks.tools.package_tool import PackageTool
from invoke import Context
from typing import Optional | 3,620 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions
# and limitations under the License.
class CleanTool:
def __init__(self, c: Context, app_name: str):
self.c = c
if app_name is None:
raise idea.exceptions.invalid_params('app_name is required')
self.app_name = app_name
self.build_tool: Optional[BuildTool] = None
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions
# and limitations under the License.
class CleanTool:
def __init__(self, c: Context, app_name: str):
self.c = c
if app_name is None:
raise idea.exceptions.invalid_params('app_name is required')
self.app_name = app_name
self.build_tool: Optional[BuildTool] = None | self.package_tool: Optional[PackageTool] = None | 1 | 2023-10-20 17:11:30+00:00 | 8k |
Agora-X/Bing-Chat-API | src/bing_chat/chathub.py | [
{
"identifier": "DELIMITER",
"path": "src/bing_chat/constants.py",
"snippet": "DELIMITER = \"\\x1e\""
},
{
"identifier": "HEADERS",
"path": "src/bing_chat/constants.py",
"snippet": "HEADERS = {\n \"accept\": \"application/json\",\n \"accept-language\": \"en-US;q=0.9\",\n \"accept-encoding\": \"gzip, deflate, br, zsdch\",\n \"content-type\": \"application/json\",\n \"sec-ch-ua\": '\"Microsoft Edge\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"',\n \"sec-ch-ua-arch\": '\"x86\"',\n \"sec-ch-ua-bitness\": '\"64\"',\n \"sec-ch-ua-full-version\": '\"117.0.2045.47\"',\n \"sec-ch-ua-full-version-list\": '\"Microsoft Edge\";v=\"117.0.2045.47\", \"Not;A=Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"117.0.5938.132\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-model\": \"\",\n \"sec-ch-ua-platform\": '\"Windows\"',\n \"sec-ch-ua-platform-version\": '\"15.0.0\"',\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-ms-gec-version\": \"1-117.0.2045.47\",\n \"x-ms-client-request-id\": str(uuid.uuid4()),\n \"x-ms-useragent\": \"azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.3 OS/Windows\",\n \"Referer\": \"https://www.bing.com/search?\",\n \"Referrer-Policy\": \"origin-when-cross-origin\",\n \"x-forwarded-for\": FORWARDED_IP,\n}"
},
{
"identifier": "HEADERS_INIT_CONVER",
"path": "src/bing_chat/constants.py",
"snippet": "HEADERS_INIT_CONVER = {\n \"authority\": \"www.bing.com\",\n \"accept\": \"application/json\",\n \"accept-language\": \"en-US;q=0.9\",\n \"cache-control\": \"max-age=0\",\n \"sec-ch-ua\": '\"Microsoft Edge\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"',\n \"sec-ch-ua-arch\": '\"x86\"',\n \"sec-ch-ua-bitness\": '\"64\"',\n \"sec-ch-ua-full-version\": '\"117.0.2045.47\"',\n \"sec-ch-ua-full-version-list\": '\"Microsoft Edge\";v=\"117.0.2045.47\", \"Not;A=Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"117.0.5938.132\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-model\": '\"\"',\n \"sec-ch-ua-platform\": '\"Windows\"',\n \"sec-ch-ua-platform-version\": '\"15.0.0\"',\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47\",\n \"x-edge-shopping-flag\": \"1\",\n \"x-forwarded-for\": FORWARDED_IP,\n}"
},
{
"identifier": "Conversation",
"path": "src/bing_chat/conversation.py",
"snippet": "class Conversation:\n def __init__(\n self,\n proxy: Union[str, None] = None,\n async_mode: bool = False,\n cookies: Union[List[dict], None] = None,\n ) -> None:\n if async_mode:\n return\n self.struct: dict = {\n \"conversationId\": None,\n \"clientId\": None,\n \"conversationSignature\": None,\n \"result\": {\"value\": \"Success\", \"message\": None},\n }\n self.proxy = proxy\n proxy = (\n proxy\n or os.environ.get(\"all_proxy\")\n or os.environ.get(\"ALL_PROXY\")\n or os.environ.get(\"https_proxy\")\n or os.environ.get(\"HTTPS_PROXY\")\n or None\n )\n if proxy is not None and proxy.startswith(\"socks5h://\"):\n proxy = \"socks5://\" + proxy[len(\"socks5h://\") :]\n self.session = httpx.Client(\n proxies=proxy,\n timeout=900,\n headers=HEADERS_INIT_CONVER,\n )\n if cookies:\n for cookie in cookies:\n self.session.cookies.set(cookie[\"name\"], cookie[\"value\"])\n # Send GET request\n response = self.session.get(\n url=os.environ.get(\"BING_PROXY_URL\")\n or \"https://edgeservices.bing.com/edgesvc/turing/conversation/create\",\n )\n if response.status_code != 200:\n print(f\"Status code: {response.status_code}\")\n print(response.text)\n print(response.url)\n raise Exception(\"Authentication failed\")\n try:\n self.struct = response.json()\n if self.struct.get(\"conversationSignature\") is None:\n self.struct[\"conversationSignature\"] = response.headers[\"X-Sydney-Conversationsignature\"]\n self.struct[\"encryptedConversationSignature\"] = response.headers[\"X-Sydney-Encryptedconversationsignature\"]\n except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:\n raise Exception(\n \"Authentication failed. You have not been accepted into the beta.\",\n ) from exc\n if self.struct[\"result\"][\"value\"] == \"UnauthorizedRequest\":\n raise NotAllowedToAccess(self.struct[\"result\"][\"message\"])\n\n @staticmethod\n async def create(\n proxy: Union[str, None] = None,\n cookies: Union[List[dict], None] = None,\n ) -> \"Conversation\":\n self = Conversation(async_mode=True)\n self.struct = {\n \"conversationId\": None,\n \"clientId\": None,\n \"conversationSignature\": None,\n \"result\": {\"value\": \"Success\", \"message\": None},\n }\n self.proxy = proxy\n proxy = (\n proxy\n or os.environ.get(\"all_proxy\")\n or os.environ.get(\"ALL_PROXY\")\n or os.environ.get(\"https_proxy\")\n or os.environ.get(\"HTTPS_PROXY\")\n or None\n )\n if proxy is not None and proxy.startswith(\"socks5h://\"):\n proxy = \"socks5://\" + proxy[len(\"socks5h://\") :]\n transport = httpx.AsyncHTTPTransport(retries=900)\n # Convert cookie format to httpx format\n formatted_cookies = None\n if cookies:\n formatted_cookies = httpx.Cookies()\n for cookie in cookies:\n formatted_cookies.set(cookie[\"name\"], cookie[\"value\"])\n async with httpx.AsyncClient(\n proxies=proxy,\n timeout=30,\n headers=HEADERS_INIT_CONVER,\n transport=transport,\n cookies=formatted_cookies,\n ) as client:\n # Send GET request\n response = await client.get(\n url=os.environ.get(\"BING_PROXY_URL\")\n or \"https://www.bing.com/turing/conversation/create\",\n follow_redirects=True,\n )\n if response.status_code != 200:\n print(f\"Status code: {response.status_code}\")\n print(response.text)\n print(response.url)\n raise Exception(\"Authentication failed\")\n try:\n self.struct = response.json()\n if self.struct.get(\"conversationSignature\") is None:\n self.struct[\"conversationSignature\"] = response.headers[\"X-Sydney-Conversationsignature\"]\n self.struct[\"encryptedConversationSignature\"] = response.headers[\"X-Sydney-Encryptedconversationsignature\"]\n except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:\n print(response.text)\n raise Exception(\n \"Authentication failed. You have not been accepted into the beta.\",\n ) from exc\n if self.struct[\"result\"][\"value\"] == \"UnauthorizedRequest\":\n raise NotAllowedToAccess(self.struct[\"result\"][\"message\"])\n return self"
},
{
"identifier": "CONVERSATION_STYLE_TYPE",
"path": "src/bing_chat/conversation_style.py",
"snippet": "CONVERSATION_STYLE_TYPE = Optional[\n Union[ConversationStyle, Literal[\"creative\", \"balanced\", \"precise\"]]\n]"
},
{
"identifier": "ChatHubRequest",
"path": "src/bing_chat/request.py",
"snippet": "class ChatHubRequest:\n def __init__(\n self,\n conversation_signature: str,\n encrypted_conversation_signature: str,\n client_id: str,\n conversation_id: str,\n invocation_id: int = 3,\n ) -> None:\n self.struct: dict = {}\n\n self.client_id: str = client_id\n self.conversation_id: str = conversation_id\n self.conversation_signature: str = conversation_signature\n self.encrypted_conversation_signature: str = encrypted_conversation_signature\n self.invocation_id: int = invocation_id\n\n def update(\n self,\n prompt: str,\n conversation_style: CONVERSATION_STYLE_TYPE,\n webpage_context: Union[str, None] = None,\n search_result: bool = False,\n locale: str = guess_locale(),\n ) -> None:\n options = [\n \"deepleo\",\n \"enable_debug_commands\",\n \"disable_emoji_spoken_text\",\n \"enablemm\",\n ]\n if conversation_style:\n if not isinstance(conversation_style, ConversationStyle):\n conversation_style = getattr(ConversationStyle, conversation_style)\n options = conversation_style.value\n message_id = str(uuid.uuid4())\n # Get the current local time\n now_local = datetime.now()\n\n # Get the current UTC time\n now_utc = datetime.utcnow()\n\n # Calculate the time difference between local and UTC time\n timezone_offset = now_local - now_utc\n\n # Get the offset in hours and minutes\n offset_hours = int(timezone_offset.total_seconds() // 3600)\n offset_minutes = int((timezone_offset.total_seconds() % 3600) // 60)\n\n # Format the offset as a string\n offset_string = f\"{offset_hours:+03d}:{offset_minutes:02d}\"\n\n # Get current time\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\") + offset_string\n self.struct = {\n \"arguments\": [\n {\n \"source\": \"cib\",\n \"optionsSets\": options,\n \"allowedMessageTypes\": [\n \"ActionRequest\",\n \"Chat\",\n \"Context\",\n \"InternalSearchQuery\",\n \"InternalSearchResult\",\n \"Disengaged\",\n \"InternalLoaderMessage\",\n \"Progress\",\n \"RenderCardRequest\",\n \"AdsQuery\",\n \"SemanticSerp\",\n \"GenerateContentQuery\",\n \"SearchQuery\",\n ],\n \"sliceIds\": [\n \"winmuid1tf\",\n \"styleoff\",\n \"ccadesk\",\n \"smsrpsuppv4cf\",\n \"ssrrcache\",\n \"contansperf\",\n \"crchatrev\",\n \"winstmsg2tf\",\n \"creatgoglt\",\n \"creatorv2t\",\n \"sydconfigoptt\",\n \"adssqovroff\",\n \"530pstho\",\n \"517opinion\",\n \"418dhlth\",\n \"512sprtic1s0\",\n \"emsgpr\",\n \"525ptrcps0\",\n \"529rweas0\",\n \"515oscfing2s0\",\n \"524vidansgs0\",\n ],\n \"verbosity\": \"verbose\",\n \"traceId\": get_ran_hex(32),\n \"isStartOfSession\": self.invocation_id == 3,\n \"message\": {\n \"locale\": locale,\n \"market\": locale,\n \"region\": locale[-2:], # en-US -> US\n \"locationHints\": get_location_hint_from_locale(locale),\n \"timestamp\": timestamp,\n \"author\": \"user\",\n \"inputMethod\": \"Keyboard\",\n \"text\": prompt,\n \"messageType\": \"Chat\",\n \"messageId\": message_id,\n \"requestId\": message_id,\n },\n \"tone\": conversation_style.name.capitalize(), # Make first letter uppercase\n \"requestId\": message_id,\n \"conversationSignature\": self.conversation_signature,\n \"encryptedConversationSignature\": self.encrypted_conversation_signature,\n \"participant\": {\n \"id\": self.client_id,\n },\n \"conversationId\": self.conversation_id,\n },\n ],\n \"invocationId\": str(self.invocation_id),\n \"target\": \"chat\",\n \"type\": 4,\n }\n if search_result:\n have_search_result = [\n \"InternalSearchQuery\",\n \"InternalSearchResult\",\n \"InternalLoaderMessage\",\n \"RenderCardRequest\",\n ]\n self.struct[\"arguments\"][0][\"allowedMessageTypes\"] += have_search_result\n if webpage_context:\n self.struct[\"arguments\"][0][\"previousMessages\"] = [\n {\n \"author\": \"user\",\n \"description\": webpage_context,\n \"contextType\": \"WebPage\",\n \"messageType\": \"Context\",\n \"messageId\": \"discover-web--page-ping-mriduna-----\",\n },\n ]\n self.invocation_id += 1\n\n # print(timestamp)"
},
{
"identifier": "append_identifier",
"path": "src/bing_chat/utilities.py",
"snippet": "def append_identifier(msg: dict) -> str:\n # Convert dict to json string\n return json.dumps(msg, ensure_ascii=False) + DELIMITER"
},
{
"identifier": "get_ran_hex",
"path": "src/bing_chat/utilities.py",
"snippet": "def get_ran_hex(length: int = 32) -> str:\n return \"\".join(random.choice(\"0123456789abcdef\") for _ in range(length))"
},
{
"identifier": "guess_locale",
"path": "src/bing_chat/utilities.py",
"snippet": "def guess_locale() -> str:\n if sys.platform.startswith(\"win\"):\n return \"en-us\"\n loc, _ = locale.getlocale()\n return loc.replace(\"_\", \"-\") if loc else \"en-us\""
}
] | import asyncio
import json
import os
import ssl
import sys
import aiohttp
import certifi
import httpx
import urllib.parse
from time import time
from typing import Generator
from typing import List
from typing import Union
from BingImageCreator import ImageGenAsync
from .constants import DELIMITER
from .constants import HEADERS
from .constants import HEADERS_INIT_CONVER
from .conversation import Conversation
from .conversation_style import CONVERSATION_STYLE_TYPE
from .request import ChatHubRequest
from .utilities import append_identifier
from .utilities import get_ran_hex
from .utilities import guess_locale | 4,399 |
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(certifi.where())
class ChatHub:
def __init__(
self,
conversation: Conversation,
proxy: str = None,
cookies: Union[List[dict], None] = None,
) -> None:
self.aio_session = None
self.request: ChatHubRequest
self.loop: bool
self.task: asyncio.Task
self.request = ChatHubRequest(
conversation_signature=conversation.struct["conversationSignature"],
encrypted_conversation_signature=conversation.struct["encryptedConversationSignature"],
client_id=conversation.struct["clientId"],
conversation_id=conversation.struct["conversationId"],
)
self.cookies = cookies
self.proxy: str = proxy
proxy = (
proxy
or os.environ.get("all_proxy")
or os.environ.get("ALL_PROXY")
or os.environ.get("https_proxy")
or os.environ.get("HTTPS_PROXY")
or None
)
if proxy is not None and proxy.startswith("socks5h://"):
proxy = "socks5://" + proxy[len("socks5h://") :]
self.session = httpx.AsyncClient(
proxies=proxy,
timeout=900,
headers=HEADERS_INIT_CONVER,
)
async def get_conversation(
self,
conversation_id: str = None,
conversation_signature: str = None,
encrypted_conversation_signature: str = None,
client_id: str = None,
) -> dict:
conversation_id = conversation_id or self.request.conversation_id
conversation_signature = (
conversation_signature or self.request.conversation_signature
)
encrypted_conversation_signature = (
encrypted_conversation_signature or self.request.encrypted_conversation_signature
)
client_id = client_id or self.request.client_id
url = f"https://sydney.bing.com/sydney/GetConversation?conversationId={conversation_id}&source=cib&participantId={client_id}&conversationSignature={conversation_signature}&encryptedConversationSignature={encrypted_conversation_signature}&traceId={get_ran_hex()}"
response = await self.session.get(url)
return response.json()
async def get_activity(self) -> dict:
url = "https://www.bing.com/turing/conversation/chats"
headers = HEADERS_INIT_CONVER.copy()
if self.cookies is not None:
for cookie in self.cookies:
if cookie["name"] == "_U":
headers["Cookie"] = f"SUID=A; _U={cookie['value']};"
break
response = await self.session.get(url, headers=headers)
return response.json()
async def ask_stream(
self,
prompt: str,
wss_link: str = None,
conversation_style: CONVERSATION_STYLE_TYPE = None,
raw: bool = False,
webpage_context: Union[str, None] = None,
search_result: bool = False,
locale: str = guess_locale(),
) -> Generator[bool, Union[dict, str], None]:
""" """
if self.request.encrypted_conversation_signature is not None:
wss_link = wss_link or "wss://sydney.bing.com/sydney/ChatHub"
wss_link += f"?sec_access_token={urllib.parse.quote(self.request.encrypted_conversation_signature)}"
cookies = {}
if self.cookies is not None:
for cookie in self.cookies:
cookies[cookie["name"]] = cookie["value"]
self.aio_session = aiohttp.ClientSession(cookies=cookies)
# Check if websocket is closed
wss = await self.aio_session.ws_connect(
wss_link or "wss://sydney.bing.com/sydney/ChatHub",
ssl=ssl_context,
headers=HEADERS,
proxy=self.proxy,
)
await self._initial_handshake(wss)
# Construct a ChatHub request
self.request.update(
prompt=prompt,
conversation_style=conversation_style,
webpage_context=webpage_context,
search_result=search_result,
locale=locale,
)
# Send request
await wss.send_str(append_identifier(self.request.struct))
draw = False
resp_txt = ""
result_text = ""
resp_txt_no_link = ""
retry_count = 5
while not wss.closed:
msg = await wss.receive_str()
if not msg:
retry_count -= 1
if retry_count == 0:
raise Exception("No response from server")
continue
if isinstance(msg, str):
|
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(certifi.where())
class ChatHub:
def __init__(
self,
conversation: Conversation,
proxy: str = None,
cookies: Union[List[dict], None] = None,
) -> None:
self.aio_session = None
self.request: ChatHubRequest
self.loop: bool
self.task: asyncio.Task
self.request = ChatHubRequest(
conversation_signature=conversation.struct["conversationSignature"],
encrypted_conversation_signature=conversation.struct["encryptedConversationSignature"],
client_id=conversation.struct["clientId"],
conversation_id=conversation.struct["conversationId"],
)
self.cookies = cookies
self.proxy: str = proxy
proxy = (
proxy
or os.environ.get("all_proxy")
or os.environ.get("ALL_PROXY")
or os.environ.get("https_proxy")
or os.environ.get("HTTPS_PROXY")
or None
)
if proxy is not None and proxy.startswith("socks5h://"):
proxy = "socks5://" + proxy[len("socks5h://") :]
self.session = httpx.AsyncClient(
proxies=proxy,
timeout=900,
headers=HEADERS_INIT_CONVER,
)
async def get_conversation(
self,
conversation_id: str = None,
conversation_signature: str = None,
encrypted_conversation_signature: str = None,
client_id: str = None,
) -> dict:
conversation_id = conversation_id or self.request.conversation_id
conversation_signature = (
conversation_signature or self.request.conversation_signature
)
encrypted_conversation_signature = (
encrypted_conversation_signature or self.request.encrypted_conversation_signature
)
client_id = client_id or self.request.client_id
url = f"https://sydney.bing.com/sydney/GetConversation?conversationId={conversation_id}&source=cib&participantId={client_id}&conversationSignature={conversation_signature}&encryptedConversationSignature={encrypted_conversation_signature}&traceId={get_ran_hex()}"
response = await self.session.get(url)
return response.json()
async def get_activity(self) -> dict:
url = "https://www.bing.com/turing/conversation/chats"
headers = HEADERS_INIT_CONVER.copy()
if self.cookies is not None:
for cookie in self.cookies:
if cookie["name"] == "_U":
headers["Cookie"] = f"SUID=A; _U={cookie['value']};"
break
response = await self.session.get(url, headers=headers)
return response.json()
async def ask_stream(
self,
prompt: str,
wss_link: str = None,
conversation_style: CONVERSATION_STYLE_TYPE = None,
raw: bool = False,
webpage_context: Union[str, None] = None,
search_result: bool = False,
locale: str = guess_locale(),
) -> Generator[bool, Union[dict, str], None]:
""" """
if self.request.encrypted_conversation_signature is not None:
wss_link = wss_link or "wss://sydney.bing.com/sydney/ChatHub"
wss_link += f"?sec_access_token={urllib.parse.quote(self.request.encrypted_conversation_signature)}"
cookies = {}
if self.cookies is not None:
for cookie in self.cookies:
cookies[cookie["name"]] = cookie["value"]
self.aio_session = aiohttp.ClientSession(cookies=cookies)
# Check if websocket is closed
wss = await self.aio_session.ws_connect(
wss_link or "wss://sydney.bing.com/sydney/ChatHub",
ssl=ssl_context,
headers=HEADERS,
proxy=self.proxy,
)
await self._initial_handshake(wss)
# Construct a ChatHub request
self.request.update(
prompt=prompt,
conversation_style=conversation_style,
webpage_context=webpage_context,
search_result=search_result,
locale=locale,
)
# Send request
await wss.send_str(append_identifier(self.request.struct))
draw = False
resp_txt = ""
result_text = ""
resp_txt_no_link = ""
retry_count = 5
while not wss.closed:
msg = await wss.receive_str()
if not msg:
retry_count -= 1
if retry_count == 0:
raise Exception("No response from server")
continue
if isinstance(msg, str): | objects = msg.split(DELIMITER) | 0 | 2023-10-19 19:17:05+00:00 | 8k |
f0uriest/interpax | interpax/_spline.py | [
{
"identifier": "errorif",
"path": "interpax/utils.py",
"snippet": "def errorif(cond, err=ValueError, msg=\"\"):\n \"\"\"Raise an error if condition is met.\n\n Similar to assert but allows wider range of Error types, rather than\n just AssertionError.\n \"\"\"\n if cond:\n raise err(msg)"
},
{
"identifier": "isbool",
"path": "interpax/utils.py",
"snippet": "def isbool(x):\n \"\"\"Check if something is boolean or ndarray of bool type.\"\"\"\n return isinstance(x, bool) or (hasattr(x, \"dtype\") and (x.dtype == bool))"
}
] | from collections import OrderedDict
from functools import partial
from typing import Union
from jax import jit
from .utils import errorif, isbool
import equinox as eqx
import jax
import jax.numpy as jnp
import numpy as np | 3,888 | if fxyz is None:
fxyz = approx_df(z, fxy, method, 2, **kwargs)
assert (
fx.shape
== fy.shape
== fz.shape
== fxy.shape
== fxz.shape
== fyz.shape
== fxyz.shape
== f.shape
)
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
j = jnp.clip(jnp.searchsorted(y, yq, side="right"), 1, len(y) - 1)
k = jnp.clip(jnp.searchsorted(z, zq, side="right"), 1, len(z) - 1)
dx = x[i] - x[i - 1]
deltax = xq - x[i - 1]
dxi = jnp.where(dx == 0, 0, 1 / dx)
tx = deltax * dxi
dy = y[j] - y[j - 1]
deltay = yq - y[j - 1]
dyi = jnp.where(dy == 0, 0, 1 / dy)
ty = deltay * dyi
dz = z[k] - z[k - 1]
deltaz = zq - z[k - 1]
dzi = jnp.where(dz == 0, 0, 1 / dz)
tz = deltaz * dzi
fs = OrderedDict()
fs["f"] = f
fs["fx"] = fx
fs["fy"] = fy
fs["fz"] = fz
fs["fxy"] = fxy
fs["fxz"] = fxz
fs["fyz"] = fyz
fs["fxyz"] = fxyz
fsq = OrderedDict()
for ff in fs.keys():
for kk in [0, 1]:
for jj in [0, 1]:
for ii in [0, 1]:
s = ff + str(ii) + str(jj) + str(kk)
fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj, k - 1 + kk]
if "x" in ff:
fsq[s] = (dx * fsq[s].T).T
if "y" in ff:
fsq[s] = (dy * fsq[s].T).T
if "z" in ff:
fsq[s] = (dz * fsq[s].T).T
F = jnp.stack([foo for foo in fsq.values()], axis=0).T
coef = jnp.vectorize(jnp.matmul, signature="(n,n),(n)->(n)")(A_TRICUBIC, F).T
coef = jnp.moveaxis(coef.reshape((4, 4, 4, *coef.shape[1:]), order="F"), 3, 0)
ttx = _get_t_der(tx, derivative_x, dxi)
tty = _get_t_der(ty, derivative_y, dyi)
ttz = _get_t_der(tz, derivative_z, dzi)
fq = jnp.einsum("lijk...,li,lj,lk->l...", coef, ttx, tty, ttz)
fq = _extrap(xq, fq, x, lowx, highx)
fq = _extrap(yq, fq, y, lowy, highy)
fq = _extrap(zq, fq, z, lowz, highz)
return fq.reshape(outshape)
@partial(jit, static_argnames=("axis"))
def _make_periodic(xq: jax.Array, x: jax.Array, period: float, axis: int, *arrs):
"""Make arrays periodic along a specified axis."""
period = abs(period)
xq = xq % period
x = x % period
i = jnp.argsort(x)
x = x[i]
x = jnp.concatenate([x[-1:] - period, x, x[:1] + period])
arrs = list(arrs)
for k in range(len(arrs)):
if arrs[k] is not None:
arrs[k] = jnp.take(arrs[k], i, axis, mode="wrap")
arrs[k] = jnp.concatenate(
[
jnp.take(arrs[k], jnp.array([-1]), axis),
arrs[k],
jnp.take(arrs[k], jnp.array([0]), axis),
],
axis=axis,
)
return (xq, x, *arrs)
@jit
def _get_t_der(t: jax.Array, derivative: int, dxi: jax.Array):
"""Get arrays of [1,t,t^2,t^3] for cubic interpolation."""
t0 = jnp.zeros_like(t)
t1 = jnp.ones_like(t)
dxi = jnp.atleast_1d(dxi)[:, None]
# derivatives of monomials
d0 = lambda: jnp.array([t1, t, t**2, t**3]).T * dxi**0
d1 = lambda: jnp.array([t0, t1, 2 * t, 3 * t**2]).T * dxi
d2 = lambda: jnp.array([t0, t0, 2 * t1, 6 * t]).T * dxi**2
d3 = lambda: jnp.array([t0, t0, t0, 6 * t1]).T * dxi**3
d4 = lambda: jnp.array([t0, t0, t0, t0]).T * (dxi * 0)
return jax.lax.switch(derivative, [d0, d1, d2, d3, d4])
def _parse_ndarg(arg, n):
try:
k = len(arg)
except TypeError:
arg = tuple(arg for _ in range(n))
k = n
assert k == n, "got too many args"
return arg
def _parse_extrap(extrap, n):
| """Functions for interpolating splines that are JAX differentiable."""
CUBIC_METHODS = ("cubic", "cubic2", "cardinal", "catmull-rom")
OTHER_METHODS = ("nearest", "linear")
METHODS_1D = CUBIC_METHODS + OTHER_METHODS + ("monotonic", "monotonic-0")
METHODS_2D = CUBIC_METHODS + OTHER_METHODS
METHODS_3D = CUBIC_METHODS + OTHER_METHODS
class Interpolator1D(eqx.Module):
"""Convenience class for representing a 1D interpolated function.
Parameters
----------
x : ndarray, shape(Nx,)
coordinates of known function values ("knots")
f : ndarray, shape(Nx,...)
function values to interpolate
method : str
method of interpolation
- ``'nearest'``: nearest neighbor interpolation
- ``'linear'``: linear interpolation
- ``'cubic'``: C1 cubic splines (aka local splines)
- ``'cubic2'``: C2 cubic splines (aka natural splines)
- ``'catmull-rom'``: C1 cubic centripetal "tension" splines
- ``'cardinal'``: C1 cubic general tension splines. If used, can also pass
keyword parameter ``c`` in float[0,1] to specify tension
- ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the
data, and will not introduce new extrema in the interpolated points
- ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at
both endpoints
extrap : bool, float, array-like
whether to extrapolate values beyond knots (True) or return nan (False),
or a specified value to return for query points outside the bounds. Can
also be passed as a 2 element array or tuple to specify different conditions
for xq<x[0] and x[-1]<xq
period : float > 0, None
periodicity of the function. If given, function is assumed to be periodic
on the interval [0,period]. None denotes no periodicity
Notes
-----
This class is registered as a PyTree in JAX (it is actually an equinox.Module)
so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)
"""
x: jax.Array
f: jax.Array
derivs: dict
method: str
extrap: Union[bool, float, tuple]
period: Union[None, float]
axis: int
def __init__(
self,
x: jax.Array,
f: jax.Array,
method: str = "cubic",
extrap: Union[bool, float, tuple] = False,
period: Union[None, float] = None,
**kwargs,
):
x, f = map(jnp.asarray, (x, f))
axis = kwargs.get("axis", 0)
fx = kwargs.pop("fx", None)
errorif(
(len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),
ValueError,
"x and f must be arrays of equal length",
)
errorif(method not in METHODS_1D, ValueError, f"unknown method {method}")
self.x = x
self.f = f
self.axis = axis
self.method = method
self.extrap = extrap
self.period = period
if fx is None:
fx = approx_df(x, f, method, axis, **kwargs)
self.derivs = {"fx": fx}
def __call__(self, xq: jax.Array, dx: int = 0):
"""Evaluate the interpolated function or its derivatives.
Parameters
----------
xq : ndarray, shape(Nq,)
Query points where interpolation is desired
dx : int >= 0
Derivative to take.
Returns
-------
fq : ndarray, shape(Nq, ...)
Interpolated values.
"""
return interp1d(
xq,
self.x,
self.f,
self.method,
dx,
self.extrap,
self.period,
**self.derivs,
)
class Interpolator2D(eqx.Module):
"""Convenience class for representing a 2D interpolated function.
Parameters
----------
x : ndarray, shape(Nx,)
x coordinates of known function values ("knots")
y : ndarray, shape(Ny,)
y coordinates of known function values ("knots")
f : ndarray, shape(Nx,Ny,...)
function values to interpolate
method : str
method of interpolation
- ``'nearest'``: nearest neighbor interpolation
- ``'linear'``: linear interpolation
- ``'cubic'``: C1 cubic splines (aka local splines)
- ``'cubic2'``: C2 cubic splines (aka natural splines)
- ``'catmull-rom'``: C1 cubic centripetal "tension" splines
- ``'cardinal'``: C1 cubic general tension splines. If used, can also pass
keyword parameter ``c`` in float[0,1] to specify tension
extrap : bool, float, array-like
whether to extrapolate values beyond knots (True) or return nan (False),
or a specified value to return for query points outside the bounds. Can
also be passed as an array or tuple to specify different conditions
[[xlow, xhigh],[ylow,yhigh]]
period : float > 0, None, array-like, shape(2,)
periodicity of the function in x, y directions. None denotes no periodicity,
otherwise function is assumed to be periodic on the interval [0,period]. Use a
single value for the same in both directions.
Notes
-----
This class is registered as a PyTree in JAX (it is actually an equinox.Module)
so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)
"""
x: jax.Array
y: jax.Array
f: jax.Array
derivs: dict
method: str
extrap: Union[bool, float, tuple]
period: Union[None, float, tuple]
axis: int
def __init__(
self,
x: jax.Array,
y: jax.Array,
f: jax.Array,
method: str = "cubic",
extrap: Union[bool, float, tuple] = False,
period: Union[None, float, tuple] = None,
**kwargs,
):
x, y, f = map(jnp.asarray, (x, y, f))
axis = kwargs.get("axis", 0)
fx = kwargs.pop("fx", None)
fy = kwargs.pop("fy", None)
fxy = kwargs.pop("fxy", None)
errorif(
(len(x) != f.shape[0]) or (x.ndim != 1),
ValueError,
"x and f must be arrays of equal length",
)
errorif(
(len(y) != f.shape[1]) or (y.ndim != 1),
ValueError,
"y and f must be arrays of equal length",
)
errorif(method not in METHODS_2D, ValueError, f"unknown method {method}")
self.x = x
self.y = y
self.f = f
self.axis = axis
self.method = method
self.extrap = extrap
self.period = period
if fx is None:
fx = approx_df(x, f, method, 0, **kwargs)
if fy is None:
fy = approx_df(y, f, method, 1, **kwargs)
if fxy is None:
fxy = approx_df(y, fx, method, 1, **kwargs)
self.derivs = {"fx": fx, "fy": fy, "fxy": fxy}
def __call__(self, xq: jax.Array, yq: jax.Array, dx: int = 0, dy: int = 0):
"""Evaluate the interpolated function or its derivatives.
Parameters
----------
xq, yq : ndarray, shape(Nq,)
x, y query points where interpolation is desired
dx, dy : int >= 0
Derivative to take in x, y directions.
Returns
-------
fq : ndarray, shape(Nq, ...)
Interpolated values.
"""
return interp2d(
xq,
yq,
self.x,
self.y,
self.f,
self.method,
(dx, dy),
self.extrap,
self.period,
**self.derivs,
)
class Interpolator3D(eqx.Module):
"""Convenience class for representing a 3D interpolated function.
Parameters
----------
x : ndarray, shape(Nx,)
x coordinates of known function values ("knots")
y : ndarray, shape(Ny,)
y coordinates of known function values ("knots")
z : ndarray, shape(Nz,)
z coordinates of known function values ("knots")
f : ndarray, shape(Nx,Ny,Nz,...)
function values to interpolate
method : str
method of interpolation
- ``'nearest'``: nearest neighbor interpolation
- ``'linear'``: linear interpolation
- ``'cubic'``: C1 cubic splines (aka local splines)
- ``'cubic2'``: C2 cubic splines (aka natural splines)
- ``'catmull-rom'``: C1 cubic centripetal "tension" splines
- ``'cardinal'``: C1 cubic general tension splines. If used, can also pass
keyword parameter ``c`` in float[0,1] to specify tension
extrap : bool, float, array-like
whether to extrapolate values beyond knots (True) or return nan (False),
or a specified value to return for query points outside the bounds. Can
also be passed as an array or tuple to specify different conditions
[[xlow, xhigh],[ylow,yhigh]]
period : float > 0, None, array-like, shape(2,)
periodicity of the function in x, y, z directions. None denotes no periodicity,
otherwise function is assumed to be periodic on the interval [0,period]. Use a
single value for the same in both directions.
Notes
-----
This class is registered as a PyTree in JAX (it is actually an equinox.Module)
so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)
"""
x: jax.Array
y: jax.Array
z: jax.Array
f: jax.Array
derivs: dict
method: str
extrap: Union[bool, float, tuple]
period: Union[None, float, tuple]
axis: int
def __init__(
self,
x: jax.Array,
y: jax.Array,
z: jax.Array,
f: jax.Array,
method: str = "cubic",
extrap: Union[bool, float, tuple] = False,
period: Union[None, float, tuple] = None,
**kwargs,
):
x, y, z, f = map(jnp.asarray, (x, y, z, f))
axis = kwargs.get("axis", 0)
errorif(
(len(x) != f.shape[0]) or (x.ndim != 1),
ValueError,
"x and f must be arrays of equal length",
)
errorif(
(len(y) != f.shape[1]) or (y.ndim != 1),
ValueError,
"y and f must be arrays of equal length",
)
errorif(
(len(z) != f.shape[2]) or (z.ndim != 1),
ValueError,
"z and f must be arrays of equal length",
)
errorif(method not in METHODS_3D, ValueError, f"unknown method {method}")
fx = kwargs.pop("fx", None)
fy = kwargs.pop("fy", None)
fz = kwargs.pop("fz", None)
fxy = kwargs.pop("fxy", None)
fxz = kwargs.pop("fxz", None)
fyz = kwargs.pop("fyz", None)
fxyz = kwargs.pop("fxyz", None)
self.x = x
self.y = y
self.z = z
self.f = f
self.axis = axis
self.method = method
self.extrap = extrap
self.period = period
if fx is None:
fx = approx_df(x, f, method, 0, **kwargs)
if fy is None:
fy = approx_df(y, f, method, 1, **kwargs)
if fz is None:
fz = approx_df(z, f, method, 2, **kwargs)
if fxy is None:
fxy = approx_df(y, fx, method, 1, **kwargs)
if fxz is None:
fxz = approx_df(z, fx, method, 2, **kwargs)
if fyz is None:
fyz = approx_df(z, fy, method, 2, **kwargs)
if fxyz is None:
fxyz = approx_df(z, fxy, method, 2, **kwargs)
self.derivs = {
"fx": fx,
"fy": fy,
"fz": fz,
"fxy": fxy,
"fxz": fxz,
"fyz": fyz,
"fxyz": fxyz,
}
def __call__(
self,
xq: jax.Array,
yq: jax.Array,
zq: jax.Array,
dx: int = 0,
dy: int = 0,
dz: int = 0,
):
"""Evaluate the interpolated function or its derivatives.
Parameters
----------
xq, yq, zq : ndarray, shape(Nq,)
x, y, z query points where interpolation is desired
dx, dy, dz : int >= 0
Derivative to take in x, y, z directions.
Returns
-------
fq : ndarray, shape(Nq, ...)
Interpolated values.
"""
return interp3d(
xq,
yq,
zq,
self.x,
self.y,
self.z,
self.f,
self.method,
(dx, dy, dz),
self.extrap,
self.period,
**self.derivs,
)
@partial(jit, static_argnames="method")
def interp1d(
xq: jax.Array,
x: jax.Array,
f: jax.Array,
method: str = "cubic",
derivative: int = 0,
extrap: Union[bool, float, tuple] = False,
period: Union[None, float] = None,
**kwargs,
):
"""Interpolate a 1d function.
Parameters
----------
xq : ndarray, shape(Nq,)
query points where interpolation is desired
x : ndarray, shape(Nx,)
coordinates of known function values ("knots")
f : ndarray, shape(Nx,...)
function values to interpolate
method : str
method of interpolation
- ``'nearest'``: nearest neighbor interpolation
- ``'linear'``: linear interpolation
- ``'cubic'``: C1 cubic splines (aka local splines)
- ``'cubic2'``: C2 cubic splines (aka natural splines)
- ``'catmull-rom'``: C1 cubic centripetal "tension" splines
- ``'cardinal'``: C1 cubic general tension splines. If used, can also pass
keyword parameter ``c`` in float[0,1] to specify tension
- ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the
data, and will not introduce new extrema in the interpolated points
- ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at
both endpoints
derivative : int >= 0
derivative order to calculate
extrap : bool, float, array-like
whether to extrapolate values beyond knots (True) or return nan (False),
or a specified value to return for query points outside the bounds. Can
also be passed as a 2 element array or tuple to specify different conditions
for xq<x[0] and x[-1]<xq
period : float > 0, None
periodicity of the function. If given, function is assumed to be periodic
on the interval [0,period]. None denotes no periodicity
Returns
-------
fq : ndarray, shape(Nq,...)
function value at query points
Notes
-----
For repeated interpolation given the same x, f data, recommend using Interpolator1D
which caches the calculation of the derivatives and spline coefficients.
"""
xq, x, f = map(jnp.asarray, (xq, x, f))
axis = kwargs.get("axis", 0)
fx = kwargs.pop("fx", None)
outshape = xq.shape + f.shape[1:]
# Promote scalar query points to 1D array.
# Note this is done after the computation of outshape
# to make jax.grad work in the scalar case.
xq = jnp.atleast_1d(xq)
errorif(
(len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),
ValueError,
"x and f must be arrays of equal length",
)
errorif(method not in METHODS_1D, ValueError, f"unknown method {method}")
lowx, highx = _parse_extrap(extrap, 1)
if period is not None:
xq, x, f, fx = _make_periodic(xq, x, period, axis, f, fx)
lowx = highx = True
if method == "nearest":
def derivative0():
i = jnp.argmin(jnp.abs(xq[:, np.newaxis] - x[np.newaxis]), axis=1)
return f[i]
def derivative1():
return jnp.zeros((xq.size, *f.shape[1:]))
fq = jax.lax.switch(derivative, [derivative0, derivative1])
elif method == "linear":
def derivative0():
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)
dx = x[i] - x[i - 1]
dxi = jnp.where(dx == 0, 0, 1 / dx)
delta = xq - x[i - 1]
fq = jnp.where(
(dx == 0),
jnp.take(f, i, axis).T,
jnp.take(f, i - 1, axis).T + (delta * dxi * df.T),
).T
return fq
def derivative1():
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)
dx = x[i] - x[i - 1]
dxi = jnp.where(dx == 0, 0, 1 / dx)
return (df.T * dxi).T
def derivative2():
return jnp.zeros((xq.size, *f.shape[1:]))
fq = jax.lax.switch(derivative, [derivative0, derivative1, derivative2])
elif method in (CUBIC_METHODS + ("monotonic", "monotonic-0")):
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
if fx is None:
fx = approx_df(x, f, method, axis, **kwargs)
assert fx.shape == f.shape
dx = x[i] - x[i - 1]
delta = xq - x[i - 1]
dxi = jnp.where(dx == 0, 0, 1 / dx)
t = delta * dxi
f0 = jnp.take(f, i - 1, axis)
f1 = jnp.take(f, i, axis)
fx0 = (jnp.take(fx, i - 1, axis).T * dx).T
fx1 = (jnp.take(fx, i, axis).T * dx).T
F = jnp.stack([f0, f1, fx0, fx1], axis=0).T
coef = jnp.vectorize(jnp.matmul, signature="(n,n),(n)->(n)")(A_CUBIC, F).T
ttx = _get_t_der(t, derivative, dxi)
fq = jnp.einsum("ji...,ij->i...", coef, ttx)
fq = _extrap(xq, fq, x, lowx, highx)
return fq.reshape(outshape)
@partial(jit, static_argnames="method")
def interp2d( # noqa: C901 - FIXME: break this up into simpler pieces
xq: jax.Array,
yq: jax.Array,
x: jax.Array,
y: jax.Array,
f: jax.Array,
method: str = "cubic",
derivative: int = 0,
extrap: Union[bool, float, tuple] = False,
period: Union[None, float, tuple] = None,
**kwargs,
):
"""Interpolate a 2d function.
Parameters
----------
xq : ndarray, shape(Nq,)
x query points where interpolation is desired
yq : ndarray, shape(Nq,)
y query points where interpolation is desired
x : ndarray, shape(Nx,)
x coordinates of known function values ("knots")
y : ndarray, shape(Ny,)
y coordinates of known function values ("knots")
f : ndarray, shape(Nx,Ny,...)
function values to interpolate
method : str
method of interpolation
- ``'nearest'``: nearest neighbor interpolation
- ``'linear'``: linear interpolation
- ``'cubic'``: C1 cubic splines (aka local splines)
- ``'cubic2'``: C2 cubic splines (aka natural splines)
- ``'catmull-rom'``: C1 cubic centripetal "tension" splines
- ``'cardinal'``: C1 cubic general tension splines. If used, can also pass
keyword parameter ``c`` in float[0,1] to specify tension
derivative : int >= 0 or array-like, shape(2,)
derivative order to calculate in x, y. Use a single value for the same in both
directions.
extrap : bool, float, array-like
whether to extrapolate values beyond knots (True) or return nan (False),
or a specified value to return for query points outside the bounds. Can
also be passed as an array or tuple to specify different conditions
[[xlow, xhigh],[ylow,yhigh]]
period : float > 0, None, array-like, shape(2,)
periodicity of the function in x, y directions. None denotes no periodicity,
otherwise function is assumed to be periodic on the interval [0,period]. Use a
single value for the same in both directions.
Returns
-------
fq : ndarray, shape(Nq,...)
function value at query points
Notes
-----
For repeated interpolation given the same x, y, f data, recommend using
Interpolator2D which caches the calculation of the derivatives and spline
coefficients.
"""
xq, yq, x, y, f = map(jnp.asarray, (xq, yq, x, y, f))
fx = kwargs.pop("fx", None)
fy = kwargs.pop("fy", None)
fxy = kwargs.pop("fxy", None)
xq, yq = jnp.broadcast_arrays(xq, yq)
outshape = xq.shape + f.shape[2:]
# Promote scalar query points to 1D array.
# Note this is done after the computation of outshape
# to make jax.grad work in the scalar case.
xq, yq = map(jnp.atleast_1d, (xq, yq))
errorif(
(len(x) != f.shape[0]) or (x.ndim != 1),
ValueError,
"x and f must be arrays of equal length",
)
errorif(
(len(y) != f.shape[1]) or (y.ndim != 1),
ValueError,
"y and f must be arrays of equal length",
)
errorif(method not in METHODS_2D, ValueError, f"unknown method {method}")
periodx, periody = _parse_ndarg(period, 2)
derivative_x, derivative_y = _parse_ndarg(derivative, 2)
lowx, highx, lowy, highy = _parse_extrap(extrap, 2)
if periodx is not None:
xq, x, f, fx, fy, fxy = _make_periodic(xq, x, periodx, 0, f, fx, fy, fxy)
lowx = highx = True
if periody is not None:
yq, y, f, fx, fy, fxy = _make_periodic(yq, y, periody, 1, f, fx, fy, fxy)
lowy = highy = True
if method == "nearest":
def derivative0():
# because of the regular spaced grid we know that the nearest point
# will be one of the 4 neighbors on the grid, so we first find those
# and then take the nearest one among them.
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
j = jnp.clip(jnp.searchsorted(y, yq, side="right"), 1, len(y) - 1)
neighbors_x = jnp.array(
[[x[i], x[i - 1], x[i], x[i - 1]], [y[j], y[j], y[j - 1], y[j - 1]]]
)
neighbors_f = jnp.array(
[f[i, j].T, f[i - 1, j].T, f[i, j - 1].T, f[i - 1, j - 1].T]
)
xyq = jnp.array([xq, yq])
dist = jnp.linalg.norm(neighbors_x - xyq[:, None, :], axis=0)
idx = jnp.argmin(dist, axis=0)
return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)
def derivative1():
return jnp.zeros((xq.size, *f.shape[2:]))
fq = jax.lax.cond(
(derivative_x == 0) & (derivative_y == 0), derivative0, derivative1
)
elif method == "linear":
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
j = jnp.clip(jnp.searchsorted(y, yq, side="right"), 1, len(y) - 1)
f00 = f[i - 1, j - 1]
f01 = f[i - 1, j]
f10 = f[i, j - 1]
f11 = f[i, j]
x0 = x[i - 1]
x1 = x[i]
y0 = y[j - 1]
y1 = y[j]
dx = x1 - x0
dxi = jnp.where(dx == 0, 0, 1 / dx)
dy = y1 - y0
dyi = jnp.where(dy == 0, 0, 1 / dy)
dx0 = lambda: jnp.array([x1 - xq, xq - x0])
dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])
dx2 = lambda: jnp.zeros((2, xq.size))
dy0 = lambda: jnp.array([y1 - yq, yq - y0])
dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])
dy2 = lambda: jnp.zeros((2, yq.size))
tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])
ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])
F = jnp.array([[f00, f01], [f10, f11]])
fq = (dxi * dyi * jnp.einsum("ijk...,ik,jk->k...", F, tx, ty).T).T
elif method in CUBIC_METHODS:
if fx is None:
fx = approx_df(x, f, method, 0, **kwargs)
if fy is None:
fy = approx_df(y, f, method, 1, **kwargs)
if fxy is None:
fxy = approx_df(y, fx, method, 1, **kwargs)
assert fx.shape == fy.shape == fxy.shape == f.shape
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
j = jnp.clip(jnp.searchsorted(y, yq, side="right"), 1, len(y) - 1)
dx = x[i] - x[i - 1]
deltax = xq - x[i - 1]
dxi = jnp.where(dx == 0, 0, 1 / dx)
tx = deltax * dxi
dy = y[j] - y[j - 1]
deltay = yq - y[j - 1]
dyi = jnp.where(dy == 0, 0, 1 / dy)
ty = deltay * dyi
fs = OrderedDict()
fs["f"] = f
fs["fx"] = fx
fs["fy"] = fy
fs["fxy"] = fxy
fsq = OrderedDict()
for ff in fs.keys():
for jj in [0, 1]:
for ii in [0, 1]:
s = ff + str(ii) + str(jj)
fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj]
if "x" in ff:
fsq[s] = (dx * fsq[s].T).T
if "y" in ff:
fsq[s] = (dy * fsq[s].T).T
F = jnp.stack([foo for foo in fsq.values()], axis=0).T
coef = jnp.vectorize(jnp.matmul, signature="(n,n),(n)->(n)")(A_BICUBIC, F).T
coef = jnp.moveaxis(coef.reshape((4, 4, *coef.shape[1:]), order="F"), 2, 0)
ttx = _get_t_der(tx, derivative_x, dxi)
tty = _get_t_der(ty, derivative_y, dyi)
fq = jnp.einsum("ijk...,ij,ik->i...", coef, ttx, tty)
fq = _extrap(xq, fq, x, lowx, highx)
fq = _extrap(yq, fq, y, lowy, highy)
return fq.reshape(outshape)
@partial(jit, static_argnames="method")
def interp3d( # noqa: C901 - FIXME: break this up into simpler pieces
xq: jax.Array,
yq: jax.Array,
zq: jax.Array,
x: jax.Array,
y: jax.Array,
z: jax.Array,
f: jax.Array,
method: str = "cubic",
derivative: int = 0,
extrap: Union[bool, float, tuple] = False,
period: Union[None, float, tuple] = None,
**kwargs,
):
"""Interpolate a 3d function.
Parameters
----------
xq : ndarray, shape(Nq,)
x query points where interpolation is desired
yq : ndarray, shape(Nq,)
y query points where interpolation is desired
zq : ndarray, shape(Nq,)
z query points where interpolation is desired
x : ndarray, shape(Nx,)
x coordinates of known function values ("knots")
y : ndarray, shape(Ny,)
y coordinates of known function values ("knots")
z : ndarray, shape(Nz,)
z coordinates of known function values ("knots")
f : ndarray, shape(Nx,Ny,Nz,...)
function values to interpolate
method : str
method of interpolation
- ``'nearest'``: nearest neighbor interpolation
- ``'linear'``: linear interpolation
- ``'cubic'``: C1 cubic splines (aka local splines)
- ``'cubic2'``: C2 cubic splines (aka natural splines)
- ``'catmull-rom'``: C1 cubic centripetal "tension" splines
- ``'cardinal'``: C1 cubic general tension splines. If used, can also pass
keyword parameter ``c`` in float[0,1] to specify tension
derivative : int >= 0, array-like, shape(3,)
derivative order to calculate in x,y,z directions. Use a single value for the
same in all directions.
extrap : bool, float, array-like
whether to extrapolate values beyond knots (True) or return nan (False),
or a specified value to return for query points outside the bounds. Can
also be passed as an array or tuple to specify different conditions for
[[xlow, xhigh],[ylow,yhigh],[zlow,zhigh]]
period : float > 0, None, array-like, shape(3,)
periodicity of the function in x, y, z directions. None denotes no periodicity,
otherwise function is assumed to be periodic on the interval [0,period]. Use a
single value for the same in all directions.
Returns
-------
fq : ndarray, shape(Nq,...)
function value at query points
Notes
-----
For repeated interpolation given the same x, y, z, f data, recommend using
Interpolator3D which caches the calculation of the derivatives and spline
coefficients.
"""
xq, yq, zq, x, y, z, f = map(jnp.asarray, (xq, yq, zq, x, y, z, f))
errorif(
(len(x) != f.shape[0]) or (x.ndim != 1),
ValueError,
"x and f must be arrays of equal length",
)
errorif(
(len(y) != f.shape[1]) or (y.ndim != 1),
ValueError,
"y and f must be arrays of equal length",
)
errorif(
(len(z) != f.shape[2]) or (z.ndim != 1),
ValueError,
"z and f must be arrays of equal length",
)
errorif(method not in METHODS_3D, ValueError, f"unknown method {method}")
xq, yq, zq = jnp.broadcast_arrays(xq, yq, zq)
outshape = xq.shape + f.shape[3:]
# Promote scalar query points to 1D array.
# Note this is done after the computation of outshape
# to make jax.grad work in the scalar case.
xq, yq, zq = map(jnp.atleast_1d, (xq, yq, zq))
fx = kwargs.pop("fx", None)
fy = kwargs.pop("fy", None)
fz = kwargs.pop("fz", None)
fxy = kwargs.pop("fxy", None)
fxz = kwargs.pop("fxz", None)
fyz = kwargs.pop("fyz", None)
fxyz = kwargs.pop("fxyz", None)
periodx, periody, periodz = _parse_ndarg(period, 3)
derivative_x, derivative_y, derivative_z = _parse_ndarg(derivative, 3)
lowx, highx, lowy, highy, lowz, highz = _parse_extrap(extrap, 3)
if periodx is not None:
xq, x, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(
xq, x, periodx, 0, f, fx, fy, fz, fxy, fxz, fyz, fxyz
)
lowx = highx = True
if periody is not None:
yq, y, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(
yq, y, periody, 1, f, fx, fy, fz, fxy, fxz, fyz, fxyz
)
lowy = highy = True
if periodz is not None:
zq, z, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(
zq, z, periodz, 2, f, fx, fy, fz, fxy, fxz, fyz, fxyz
)
lowz = highz = True
if method == "nearest":
def derivative0():
# because of the regular spaced grid we know that the nearest point
# will be one of the 8 neighbors on the grid, so we first find those
# and then take the nearest one among them.
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
j = jnp.clip(jnp.searchsorted(y, yq, side="right"), 1, len(y) - 1)
k = jnp.clip(jnp.searchsorted(z, zq, side="right"), 1, len(z) - 1)
neighbors_x = jnp.array(
[
[x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1]],
[y[j], y[j], y[j - 1], y[j - 1], y[j], y[j], y[j - 1], y[j - 1]],
[z[k], z[k], z[k], z[k], z[k - 1], z[k - 1], z[k - 1], z[k - 1]],
]
)
neighbors_f = jnp.array(
[
f[i, j, k].T,
f[i - 1, j, k].T,
f[i, j - 1, k].T,
f[i - 1, j - 1, k].T,
f[i, j, k - 1].T,
f[i - 1, j, k - 1].T,
f[i, j - 1, k - 1].T,
f[i - 1, j - 1, k - 1].T,
]
)
xyzq = jnp.array([xq, yq, zq])
dist = jnp.linalg.norm(neighbors_x - xyzq[:, None, :], axis=0)
idx = jnp.argmin(dist, axis=0)
return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)
def derivative1():
return jnp.zeros((xq.size, *f.shape[3:]))
fq = jax.lax.cond(
(derivative_x == 0) & (derivative_y == 0) & (derivative_z == 0),
derivative0,
derivative1,
)
elif method == "linear":
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
j = jnp.clip(jnp.searchsorted(y, yq, side="right"), 1, len(y) - 1)
k = jnp.clip(jnp.searchsorted(z, zq, side="right"), 1, len(z) - 1)
f000 = f[i - 1, j - 1, k - 1]
f001 = f[i - 1, j - 1, k]
f010 = f[i - 1, j, k - 1]
f100 = f[i, j - 1, k - 1]
f110 = f[i, j, k - 1]
f011 = f[i - 1, j, k]
f101 = f[i, j - 1, k]
f111 = f[i, j, k]
x0 = x[i - 1]
x1 = x[i]
y0 = y[j - 1]
y1 = y[j]
z0 = z[k - 1]
z1 = z[k]
dx = x1 - x0
dxi = jnp.where(dx == 0, 0, 1 / dx)
dy = y1 - y0
dyi = jnp.where(dy == 0, 0, 1 / dy)
dz = z1 - z0
dzi = jnp.where(dz == 0, 0, 1 / dz)
dx0 = lambda: jnp.array([x1 - xq, xq - x0])
dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])
dx2 = lambda: jnp.zeros((2, xq.size))
dy0 = lambda: jnp.array([y1 - yq, yq - y0])
dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])
dy2 = lambda: jnp.zeros((2, yq.size))
dz0 = lambda: jnp.array([z1 - zq, zq - z0])
dz1 = lambda: jnp.array([-jnp.ones_like(zq), jnp.ones_like(zq)])
dz2 = lambda: jnp.zeros((2, zq.size))
tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])
ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])
tz = jax.lax.switch(derivative_z, [dz0, dz1, dz2])
F = jnp.array([[[f000, f001], [f010, f011]], [[f100, f101], [f110, f111]]])
fq = (dxi * dyi * dzi * jnp.einsum("lijk...,lk,ik,jk->k...", F, tx, ty, tz).T).T
elif method in CUBIC_METHODS:
if fx is None:
fx = approx_df(x, f, method, 0, **kwargs)
if fy is None:
fy = approx_df(y, f, method, 1, **kwargs)
if fz is None:
fz = approx_df(z, f, method, 2, **kwargs)
if fxy is None:
fxy = approx_df(y, fx, method, 1, **kwargs)
if fxz is None:
fxz = approx_df(z, fx, method, 2, **kwargs)
if fyz is None:
fyz = approx_df(z, fy, method, 2, **kwargs)
if fxyz is None:
fxyz = approx_df(z, fxy, method, 2, **kwargs)
assert (
fx.shape
== fy.shape
== fz.shape
== fxy.shape
== fxz.shape
== fyz.shape
== fxyz.shape
== f.shape
)
i = jnp.clip(jnp.searchsorted(x, xq, side="right"), 1, len(x) - 1)
j = jnp.clip(jnp.searchsorted(y, yq, side="right"), 1, len(y) - 1)
k = jnp.clip(jnp.searchsorted(z, zq, side="right"), 1, len(z) - 1)
dx = x[i] - x[i - 1]
deltax = xq - x[i - 1]
dxi = jnp.where(dx == 0, 0, 1 / dx)
tx = deltax * dxi
dy = y[j] - y[j - 1]
deltay = yq - y[j - 1]
dyi = jnp.where(dy == 0, 0, 1 / dy)
ty = deltay * dyi
dz = z[k] - z[k - 1]
deltaz = zq - z[k - 1]
dzi = jnp.where(dz == 0, 0, 1 / dz)
tz = deltaz * dzi
fs = OrderedDict()
fs["f"] = f
fs["fx"] = fx
fs["fy"] = fy
fs["fz"] = fz
fs["fxy"] = fxy
fs["fxz"] = fxz
fs["fyz"] = fyz
fs["fxyz"] = fxyz
fsq = OrderedDict()
for ff in fs.keys():
for kk in [0, 1]:
for jj in [0, 1]:
for ii in [0, 1]:
s = ff + str(ii) + str(jj) + str(kk)
fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj, k - 1 + kk]
if "x" in ff:
fsq[s] = (dx * fsq[s].T).T
if "y" in ff:
fsq[s] = (dy * fsq[s].T).T
if "z" in ff:
fsq[s] = (dz * fsq[s].T).T
F = jnp.stack([foo for foo in fsq.values()], axis=0).T
coef = jnp.vectorize(jnp.matmul, signature="(n,n),(n)->(n)")(A_TRICUBIC, F).T
coef = jnp.moveaxis(coef.reshape((4, 4, 4, *coef.shape[1:]), order="F"), 3, 0)
ttx = _get_t_der(tx, derivative_x, dxi)
tty = _get_t_der(ty, derivative_y, dyi)
ttz = _get_t_der(tz, derivative_z, dzi)
fq = jnp.einsum("lijk...,li,lj,lk->l...", coef, ttx, tty, ttz)
fq = _extrap(xq, fq, x, lowx, highx)
fq = _extrap(yq, fq, y, lowy, highy)
fq = _extrap(zq, fq, z, lowz, highz)
return fq.reshape(outshape)
@partial(jit, static_argnames=("axis"))
def _make_periodic(xq: jax.Array, x: jax.Array, period: float, axis: int, *arrs):
"""Make arrays periodic along a specified axis."""
period = abs(period)
xq = xq % period
x = x % period
i = jnp.argsort(x)
x = x[i]
x = jnp.concatenate([x[-1:] - period, x, x[:1] + period])
arrs = list(arrs)
for k in range(len(arrs)):
if arrs[k] is not None:
arrs[k] = jnp.take(arrs[k], i, axis, mode="wrap")
arrs[k] = jnp.concatenate(
[
jnp.take(arrs[k], jnp.array([-1]), axis),
arrs[k],
jnp.take(arrs[k], jnp.array([0]), axis),
],
axis=axis,
)
return (xq, x, *arrs)
@jit
def _get_t_der(t: jax.Array, derivative: int, dxi: jax.Array):
"""Get arrays of [1,t,t^2,t^3] for cubic interpolation."""
t0 = jnp.zeros_like(t)
t1 = jnp.ones_like(t)
dxi = jnp.atleast_1d(dxi)[:, None]
# derivatives of monomials
d0 = lambda: jnp.array([t1, t, t**2, t**3]).T * dxi**0
d1 = lambda: jnp.array([t0, t1, 2 * t, 3 * t**2]).T * dxi
d2 = lambda: jnp.array([t0, t0, 2 * t1, 6 * t]).T * dxi**2
d3 = lambda: jnp.array([t0, t0, t0, 6 * t1]).T * dxi**3
d4 = lambda: jnp.array([t0, t0, t0, t0]).T * (dxi * 0)
return jax.lax.switch(derivative, [d0, d1, d2, d3, d4])
def _parse_ndarg(arg, n):
try:
k = len(arg)
except TypeError:
arg = tuple(arg for _ in range(n))
k = n
assert k == n, "got too many args"
return arg
def _parse_extrap(extrap, n): | if isbool(extrap): # same for lower,upper in all dimensions | 1 | 2023-10-18 13:12:20+00:00 | 8k |
city96/ComfyUI_ExtraModels | VAE/models/temporal_ae.py | [
{
"identifier": "Encoder",
"path": "VAE/models/kl.py",
"snippet": "class Encoder(nn.Module):\n\tdef __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n\t\t\t\t attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,\n\t\t\t\t resolution, z_channels, double_z=True, use_linear_attn=False, attn_type=\"vanilla\",\n\t\t\t\t **ignore_kwargs):\n\t\tsuper().__init__()\n\t\tif use_linear_attn: attn_type = \"linear\"\n\t\tself.ch = ch\n\t\tself.temb_ch = 0\n\t\tself.num_resolutions = len(ch_mult)\n\t\tself.num_res_blocks = num_res_blocks\n\t\tself.resolution = resolution\n\t\tself.in_channels = in_channels\n\n\t\t# downsampling\n\t\tself.conv_in = torch.nn.Conv2d(in_channels,\n\t\t\t\t\t\t\t\t\t self.ch,\n\t\t\t\t\t\t\t\t\t kernel_size=3,\n\t\t\t\t\t\t\t\t\t stride=1,\n\t\t\t\t\t\t\t\t\t padding=1)\n\n\t\tcurr_res = resolution\n\t\tin_ch_mult = (1,)+tuple(ch_mult)\n\t\tself.in_ch_mult = in_ch_mult\n\t\tself.down = nn.ModuleList()\n\t\tfor i_level in range(self.num_resolutions):\n\t\t\tblock = nn.ModuleList()\n\t\t\tattn = nn.ModuleList()\n\t\t\tblock_in = ch*in_ch_mult[i_level]\n\t\t\tblock_out = ch*ch_mult[i_level]\n\t\t\tfor i_block in range(self.num_res_blocks):\n\t\t\t\tblock.append(ResnetBlock(in_channels=block_in,\n\t\t\t\t\t\t\t\t\t\t out_channels=block_out,\n\t\t\t\t\t\t\t\t\t\t temb_channels=self.temb_ch,\n\t\t\t\t\t\t\t\t\t\t dropout=dropout))\n\t\t\t\tblock_in = block_out\n\t\t\t\tif curr_res in attn_resolutions:\n\t\t\t\t\tattn.append(make_attn(block_in, attn_type=attn_type))\n\t\t\tdown = nn.Module()\n\t\t\tdown.block = block\n\t\t\tdown.attn = attn\n\t\t\tif i_level != self.num_resolutions-1:\n\t\t\t\tdown.downsample = Downsample(block_in, resamp_with_conv)\n\t\t\t\tcurr_res = curr_res // 2\n\t\t\tself.down.append(down)\n\n\t\t# middle\n\t\tself.mid = nn.Module()\n\t\tself.mid.block_1 = ResnetBlock(in_channels=block_in,\n\t\t\t\t\t\t\t\t\t out_channels=block_in,\n\t\t\t\t\t\t\t\t\t temb_channels=self.temb_ch,\n\t\t\t\t\t\t\t\t\t dropout=dropout)\n\t\tself.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n\t\tself.mid.block_2 = ResnetBlock(in_channels=block_in,\n\t\t\t\t\t\t\t\t\t out_channels=block_in,\n\t\t\t\t\t\t\t\t\t temb_channels=self.temb_ch,\n\t\t\t\t\t\t\t\t\t dropout=dropout)\n\n\t\t# end\n\t\tself.norm_out = Normalize(block_in)\n\t\tself.conv_out = torch.nn.Conv2d(block_in,\n\t\t\t\t\t\t\t\t\t\t2*z_channels if double_z else z_channels,\n\t\t\t\t\t\t\t\t\t\tkernel_size=3,\n\t\t\t\t\t\t\t\t\t\tstride=1,\n\t\t\t\t\t\t\t\t\t\tpadding=1)\n\n\tdef forward(self, x):\n\t\t# timestep embedding\n\t\ttemb = None\n\n\t\t# downsampling\n\t\ths = [self.conv_in(x)]\n\t\tfor i_level in range(self.num_resolutions):\n\t\t\tfor i_block in range(self.num_res_blocks):\n\t\t\t\th = self.down[i_level].block[i_block](hs[-1], temb)\n\t\t\t\tif len(self.down[i_level].attn) > 0:\n\t\t\t\t\th = self.down[i_level].attn[i_block](h)\n\t\t\t\ths.append(h)\n\t\t\tif i_level != self.num_resolutions-1:\n\t\t\t\ths.append(self.down[i_level].downsample(hs[-1]))\n\n\t\t# middle\n\t\th = hs[-1]\n\t\th = self.mid.block_1(h, temb)\n\t\th = self.mid.attn_1(h)\n\t\th = self.mid.block_2(h, temb)\n\n\t\t# end\n\t\th = self.norm_out(h)\n\t\th = nonlinearity(h)\n\t\th = self.conv_out(h)\n\t\treturn h"
},
{
"identifier": "Decoder",
"path": "VAE/models/kl.py",
"snippet": "class Decoder(nn.Module):\n\tdef __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n\t\t\t\t attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,\n\t\t\t\t resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,\n\t\t\t\t attn_type=\"vanilla\", post_quant_conv=None, **ignorekwargs):\n\t\tsuper().__init__()\n\t\tif use_linear_attn: attn_type = \"linear\"\n\t\tself.ch = ch\n\t\tself.temb_ch = 0\n\t\tself.num_resolutions = len(ch_mult)\n\t\tself.num_res_blocks = num_res_blocks\n\t\tself.resolution = resolution\n\t\tself.in_channels = in_channels\n\t\tself.give_pre_end = give_pre_end\n\t\tself.tanh_out = tanh_out\n\t\tself.post_quant_conv = post_quant_conv\n\n\t\t# compute in_ch_mult, block_in and curr_res at lowest res\n\t\tin_ch_mult = (1,)+tuple(ch_mult)\n\t\tblock_in = ch*ch_mult[self.num_resolutions-1]\n\t\tcurr_res = resolution // 2**(self.num_resolutions-1)\n\t\tself.z_shape = (1,z_channels,curr_res,curr_res)\n\t\tprint(\"Working with z of shape {} = {} dimensions.\".format(\n\t\t\tself.z_shape, np.prod(self.z_shape)))\n\n\t\t# z to block_in\n\t\tself.conv_in = torch.nn.Conv2d(z_channels,\n\t\t\t\t\t\t\t\t\t block_in,\n\t\t\t\t\t\t\t\t\t kernel_size=3,\n\t\t\t\t\t\t\t\t\t stride=1,\n\t\t\t\t\t\t\t\t\t padding=1)\n\n\t\t# middle\n\t\tself.mid = nn.Module()\n\t\tself.mid.block_1 = ResnetBlock(in_channels=block_in,\n\t\t\t\t\t\t\t\t\t out_channels=block_in,\n\t\t\t\t\t\t\t\t\t temb_channels=self.temb_ch,\n\t\t\t\t\t\t\t\t\t dropout=dropout)\n\t\tself.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n\t\tself.mid.block_2 = ResnetBlock(in_channels=block_in,\n\t\t\t\t\t\t\t\t\t out_channels=block_in,\n\t\t\t\t\t\t\t\t\t temb_channels=self.temb_ch,\n\t\t\t\t\t\t\t\t\t dropout=dropout)\n\n\t\t# upsampling\n\t\tself.up = nn.ModuleList()\n\t\tfor i_level in reversed(range(self.num_resolutions)):\n\t\t\tblock = nn.ModuleList()\n\t\t\tattn = nn.ModuleList()\n\t\t\tblock_out = ch*ch_mult[i_level]\n\t\t\tfor i_block in range(self.num_res_blocks+1):\n\t\t\t\tblock.append(ResnetBlock(in_channels=block_in,\n\t\t\t\t\t\t\t\t\t\t out_channels=block_out,\n\t\t\t\t\t\t\t\t\t\t temb_channels=self.temb_ch,\n\t\t\t\t\t\t\t\t\t\t dropout=dropout))\n\t\t\t\tblock_in = block_out\n\t\t\t\tif curr_res in attn_resolutions:\n\t\t\t\t\tattn.append(make_attn(block_in, attn_type=attn_type))\n\t\t\tup = nn.Module()\n\t\t\tup.block = block\n\t\t\tup.attn = attn\n\t\t\tif i_level != 0:\n\t\t\t\tup.upsample = Upsample(block_in, resamp_with_conv)\n\t\t\t\tcurr_res = curr_res * 2\n\t\t\tself.up.insert(0, up) # prepend to get consistent order\n\n\t\t# end\n\t\tself.norm_out = Normalize(block_in)\n\t\tself.conv_out = torch.nn.Conv2d(block_in,\n\t\t\t\t\t\t\t\t\t\tout_ch,\n\t\t\t\t\t\t\t\t\t\tkernel_size=3,\n\t\t\t\t\t\t\t\t\t\tstride=1,\n\t\t\t\t\t\t\t\t\t\tpadding=1)\n\n\tdef forward(self, z):\n\t\t#assert z.shape[1:] == self.z_shape[1:]\n\t\tself.last_z_shape = z.shape\n\n\t\t# timestep embedding\n\t\ttemb = None\n\n\t\t# z to block_in\n\t\th = self.conv_in(z)\n\n\t\t# middle\n\t\th = self.mid.block_1(h, temb)\n\t\th = self.mid.attn_1(h)\n\t\th = self.mid.block_2(h, temb)\n\n\t\t# upsampling\n\t\tfor i_level in reversed(range(self.num_resolutions)):\n\t\t\tfor i_block in range(self.num_res_blocks+1):\n\t\t\t\th = self.up[i_level].block[i_block](h, temb)\n\t\t\t\tif len(self.up[i_level].attn) > 0:\n\t\t\t\t\th = self.up[i_level].attn[i_block](h)\n\t\t\tif i_level != 0:\n\t\t\t\th = self.up[i_level].upsample(h)\n\n\t\t# end\n\t\tif self.give_pre_end:\n\t\t\treturn h\n\n\t\th = self.norm_out(h)\n\t\th = nonlinearity(h)\n\t\th = self.conv_out(h)\n\t\tif self.tanh_out:\n\t\t\th = torch.tanh(h)\n\t\treturn h"
},
{
"identifier": "Upsample",
"path": "VAE/models/kl.py",
"snippet": "class Upsample(nn.Module):\n\tdef __init__(self, in_channels, with_conv):\n\t\tsuper().__init__()\n\t\tself.with_conv = with_conv\n\t\tif self.with_conv:\n\t\t\tself.conv = torch.nn.Conv2d(in_channels,\n\t\t\t\t\t\t\t\t\t\tin_channels,\n\t\t\t\t\t\t\t\t\t\tkernel_size=3,\n\t\t\t\t\t\t\t\t\t\tstride=1,\n\t\t\t\t\t\t\t\t\t\tpadding=1)\n\n\tdef forward(self, x):\n\t\t# BF16 fix\n\t\txh = x.to(torch.float32)\n\t\txh = torch.nn.functional.interpolate(xh, scale_factor=2.0, mode=\"nearest\")\n\t\tx = xh.to(x.dtype)\n\n\t\tif self.with_conv:\n\t\t\tx = self.conv(x)\n\t\treturn x"
},
{
"identifier": "Normalize",
"path": "VAE/models/kl.py",
"snippet": "def Normalize(in_channels, num_groups=32):\n\treturn torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)"
},
{
"identifier": "AttnBlock",
"path": "VAE/models/kl.py",
"snippet": "class AttnBlock(nn.Module):\n\tdef __init__(self, in_channels):\n\t\tsuper().__init__()\n\t\tself.in_channels = in_channels\n\n\t\tself.norm = Normalize(in_channels)\n\t\tself.q = torch.nn.Conv2d(in_channels,\n\t\t\t\t\t\t\t\t in_channels,\n\t\t\t\t\t\t\t\t kernel_size=1,\n\t\t\t\t\t\t\t\t stride=1,\n\t\t\t\t\t\t\t\t padding=0)\n\t\tself.k = torch.nn.Conv2d(in_channels,\n\t\t\t\t\t\t\t\t in_channels,\n\t\t\t\t\t\t\t\t kernel_size=1,\n\t\t\t\t\t\t\t\t stride=1,\n\t\t\t\t\t\t\t\t padding=0)\n\t\tself.v = torch.nn.Conv2d(in_channels,\n\t\t\t\t\t\t\t\t in_channels,\n\t\t\t\t\t\t\t\t kernel_size=1,\n\t\t\t\t\t\t\t\t stride=1,\n\t\t\t\t\t\t\t\t padding=0)\n\t\tself.proj_out = torch.nn.Conv2d(in_channels,\n\t\t\t\t\t\t\t\t\t\tin_channels,\n\t\t\t\t\t\t\t\t\t\tkernel_size=1,\n\t\t\t\t\t\t\t\t\t\tstride=1,\n\t\t\t\t\t\t\t\t\t\tpadding=0)\n\n\n\tdef forward(self, x):\n\t\th_ = x\n\t\th_ = self.norm(h_)\n\t\tq = self.q(h_)\n\t\tk = self.k(h_)\n\t\tv = self.v(h_)\n\n\t\t# compute attention\n\t\tb,c,h,w = q.shape\n\t\tq = q.reshape(b,c,h*w)\n\t\tq = q.permute(0,2,1) # b,hw,c\n\t\tk = k.reshape(b,c,h*w) # b,c,hw\n\t\tw_ = torch.bmm(q,k)\t # b,hw,hw\tw[b,i,j]=sum_c q[b,i,c]k[b,c,j]\n\t\t# w_ = w_ * (int(c)**(-0.5))\n\t\tw_ = w_ * (c**(-0.5))\n\t\tw_ = torch.nn.functional.softmax(w_, dim=2)\n\n\t\t# attend to values\n\t\tv = v.reshape(b,c,h*w)\n\t\tw_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)\n\t\th_ = torch.bmm(v,w_)\t # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]\n\t\th_ = h_.reshape(b,c,h,w)\n\n\t\th_ = self.proj_out(h_)\n\n\t\treturn x+h_"
},
{
"identifier": "ResnetBlock",
"path": "VAE/models/kl.py",
"snippet": "class ResnetBlock(nn.Module):\n\tdef __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,\n\t\t\t\t dropout, temb_channels=512):\n\t\tsuper().__init__()\n\t\tself.in_channels = in_channels\n\t\tout_channels = in_channels if out_channels is None else out_channels\n\t\tself.out_channels = out_channels\n\t\tself.use_conv_shortcut = conv_shortcut\n\n\t\tself.norm1 = Normalize(in_channels)\n\t\tself.conv1 = torch.nn.Conv2d(in_channels,\n\t\t\t\t\t\t\t\t\t out_channels,\n\t\t\t\t\t\t\t\t\t kernel_size=3,\n\t\t\t\t\t\t\t\t\t stride=1,\n\t\t\t\t\t\t\t\t\t padding=1)\n\t\tif temb_channels > 0:\n\t\t\tself.temb_proj = torch.nn.Linear(temb_channels,\n\t\t\t\t\t\t\t\t\t\t\t out_channels)\n\t\tself.norm2 = Normalize(out_channels)\n\t\tself.dropout = torch.nn.Dropout(dropout)\n\t\tself.conv2 = torch.nn.Conv2d(out_channels,\n\t\t\t\t\t\t\t\t\t out_channels,\n\t\t\t\t\t\t\t\t\t kernel_size=3,\n\t\t\t\t\t\t\t\t\t stride=1,\n\t\t\t\t\t\t\t\t\t padding=1)\n\t\tif self.in_channels != self.out_channels:\n\t\t\tif self.use_conv_shortcut:\n\t\t\t\tself.conv_shortcut = torch.nn.Conv2d(in_channels,\n\t\t\t\t\t\t\t\t\t\t\t\t\t out_channels,\n\t\t\t\t\t\t\t\t\t\t\t\t\t kernel_size=3,\n\t\t\t\t\t\t\t\t\t\t\t\t\t stride=1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t padding=1)\n\t\t\telse:\n\t\t\t\tself.nin_shortcut = torch.nn.Conv2d(in_channels,\n\t\t\t\t\t\t\t\t\t\t\t\t\tout_channels,\n\t\t\t\t\t\t\t\t\t\t\t\t\tkernel_size=1,\n\t\t\t\t\t\t\t\t\t\t\t\t\tstride=1,\n\t\t\t\t\t\t\t\t\t\t\t\t\tpadding=0)\n\n\tdef forward(self, x, temb):\n\t\th = x\n\t\th = self.norm1(h)\n\t\th = nonlinearity(h)\n\t\th = self.conv1(h)\n\n\t\tif temb is not None:\n\t\t\th = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]\n\n\t\th = self.norm2(h)\n\t\th = nonlinearity(h)\n\t\th = self.dropout(h)\n\t\th = self.conv2(h)\n\n\t\tif self.in_channels != self.out_channels:\n\t\t\tif self.use_conv_shortcut:\n\t\t\t\tx = self.conv_shortcut(x)\n\t\t\telse:\n\t\t\t\tx = self.nin_shortcut(x)\n\n\t\treturn x+h"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "VAE/models/kl.py",
"snippet": "class DiagonalGaussianDistribution(object):\n\tdef __init__(self, parameters, deterministic=False):\n\t\tself.parameters = parameters\n\t\tself.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n\t\tself.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n\t\tself.deterministic = deterministic\n\t\tself.std = torch.exp(0.5 * self.logvar)\n\t\tself.var = torch.exp(self.logvar)\n\t\tif self.deterministic:\n\t\t\tself.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n\tdef sample(self):\n\t\tx = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n\t\treturn x\n\n\tdef kl(self, other=None):\n\t\tif self.deterministic:\n\t\t\treturn torch.Tensor([0.])\n\t\telse:\n\t\t\tif other is None:\n\t\t\t\treturn 0.5 * torch.sum(torch.pow(self.mean, 2)\n\t\t\t\t\t\t\t\t\t + self.var - 1.0 - self.logvar,\n\t\t\t\t\t\t\t\t\t dim=[1, 2, 3])\n\t\t\telse:\n\t\t\t\treturn 0.5 * torch.sum(\n\t\t\t\t\ttorch.pow(self.mean - other.mean, 2) / other.var\n\t\t\t\t\t+ self.var / other.var - 1.0 - self.logvar + other.logvar,\n\t\t\t\t\tdim=[1, 2, 3])\n\n\tdef nll(self, sample, dims=[1,2,3]):\n\t\tif self.deterministic:\n\t\t\treturn torch.Tensor([0.])\n\t\tlogtwopi = np.log(2.0 * np.pi)\n\t\treturn 0.5 * torch.sum(\n\t\t\tlogtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n\t\t\tdim=dims)\n\n\tdef mode(self):\n\t\treturn self.mean"
},
{
"identifier": "nonlinearity",
"path": "VAE/models/kl.py",
"snippet": "def nonlinearity(x): # swish\n\treturn x*torch.sigmoid(x)"
},
{
"identifier": "make_attn",
"path": "VAE/models/kl.py",
"snippet": "def make_attn(in_channels, attn_type=\"vanilla\"):\n\tassert attn_type in [\"vanilla\", \"linear\", \"none\"], f'attn_type {attn_type} unknown'\n\tprint(f\"making attention of type '{attn_type}' with {in_channels} in_channels\")\n\tif attn_type == \"vanilla\":\n\t\treturn AttnBlock(in_channels)\n\telif attn_type == \"none\":\n\t\treturn nn.Identity(in_channels)\n\telse:\n\t\treturn LinAttnBlock(in_channels)"
}
] | import math
import torch
import numpy as np
from torch import nn
from typing import Callable, Iterable, Union, Optional
from einops import rearrange, repeat
from comfy import model_management
from .kl import (
Encoder, Decoder, Upsample, Normalize,
AttnBlock, ResnetBlock, #MemoryEfficientAttnBlock,
DiagonalGaussianDistribution, nonlinearity, make_attn
) | 5,441 | # all_out.append(dec)
# out = torch.cat(all_out, dim=0)
## default
out = self.decoder(
z, timesteps=len(z)
)
return out
def forward(self, input, sample_posterior=True):
posterior = self.encode(input)
if sample_posterior:
z = posterior.sample()
else:
z = posterior.mode()
dec = self.decode(z)
return dec, posterior
class VideoDecoder(nn.Module):
available_time_modes = ["all", "conv-only", "attn-only"]
def __init__(
self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
attn_type="vanilla",
video_kernel_size: Union[int, list] = 3, alpha: float = 0.0, merge_strategy: str = "learned", time_mode: str = "conv-only",
**ignorekwargs
):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
self.tanh_out = tanh_out
self.video_kernel_size = video_kernel_size
self.alpha = alpha
self.merge_strategy = merge_strategy
self.time_mode = time_mode
assert (
self.time_mode in self.available_time_modes
), f"time_mode parameter has to be in {self.available_time_modes}"
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(
z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1
)
# middle
self.mid = nn.Module()
self.mid.block_1 = VideoResBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
video_kernel_size=self.video_kernel_size,
alpha=self.alpha,
merge_strategy=self.merge_strategy,
)
self.mid.attn_1 = make_attn(
block_in,
attn_type=attn_type,
)
self.mid.block_2 = VideoResBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
video_kernel_size=self.video_kernel_size,
alpha=self.alpha,
merge_strategy=self.merge_strategy,
)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(VideoResBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
video_kernel_size=self.video_kernel_size,
alpha=self.alpha,
merge_strategy=self.merge_strategy,
))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(
block_in,
attn_type=attn_type,
))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
|
class AutoencoderKL(nn.Module):
def __init__(self, config):
super().__init__()
self.embed_dim = config["embed_dim"]
self.encoder = Encoder(**config)
self.decoder = VideoDecoder(**config)
assert config["double_z"]
# these aren't used here for some reason
# self.quant_conv = torch.nn.Conv2d(2*config["z_channels"], 2*self.embed_dim, 1)
# self.post_quant_conv = torch.nn.Conv2d(self.embed_dim, config["z_channels"], 1)
def encode(self, x):
## batched
# n_samples = x.shape[0]
# n_rounds = math.ceil(x.shape[0] / n_samples)
# all_out = []
# for n in range(n_rounds):
# h = self.encoder(
# x[n * n_samples : (n + 1) * n_samples]
# )
# moments = h # self.quant_conv(h)
# posterior = DiagonalGaussianDistribution(moments)
# all_out.append(posterior.sample())
# z = torch.cat(all_out, dim=0)
# return z
## default
h = self.encoder(x)
moments = h # self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
return posterior.sample()
def decode(self, z):
## batched - seems the same as default?
# n_samples = z.shape[0]
# n_rounds = math.ceil(z.shape[0] / n_samples)
# all_out = []
# for n in range(n_rounds):
# dec = self.decoder(
# z[n * n_samples : (n + 1) * n_samples],
# timesteps=len(z[n * n_samples : (n + 1) * n_samples]),
# )
# all_out.append(dec)
# out = torch.cat(all_out, dim=0)
## default
out = self.decoder(
z, timesteps=len(z)
)
return out
def forward(self, input, sample_posterior=True):
posterior = self.encode(input)
if sample_posterior:
z = posterior.sample()
else:
z = posterior.mode()
dec = self.decode(z)
return dec, posterior
class VideoDecoder(nn.Module):
available_time_modes = ["all", "conv-only", "attn-only"]
def __init__(
self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
attn_type="vanilla",
video_kernel_size: Union[int, list] = 3, alpha: float = 0.0, merge_strategy: str = "learned", time_mode: str = "conv-only",
**ignorekwargs
):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
self.tanh_out = tanh_out
self.video_kernel_size = video_kernel_size
self.alpha = alpha
self.merge_strategy = merge_strategy
self.time_mode = time_mode
assert (
self.time_mode in self.available_time_modes
), f"time_mode parameter has to be in {self.available_time_modes}"
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(
z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1
)
# middle
self.mid = nn.Module()
self.mid.block_1 = VideoResBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
video_kernel_size=self.video_kernel_size,
alpha=self.alpha,
merge_strategy=self.merge_strategy,
)
self.mid.attn_1 = make_attn(
block_in,
attn_type=attn_type,
)
self.mid.block_2 = VideoResBlock(
in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout,
video_kernel_size=self.video_kernel_size,
alpha=self.alpha,
merge_strategy=self.merge_strategy,
)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(VideoResBlock(
in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout,
video_kernel_size=self.video_kernel_size,
alpha=self.alpha,
merge_strategy=self.merge_strategy,
))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(
block_in,
attn_type=attn_type,
))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end | self.norm_out = Normalize(block_in) | 3 | 2023-10-20 21:19:44+00:00 | 8k |
aikunyi/FreTS | data_provider/data_factory.py | [
{
"identifier": "Dataset_Covid",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_Covid(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, timeenc=0, freq='h', train_only=False):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24 * 4 * 4\n self.label_len = 24 * 4\n self.pred_len = 24 * 4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train': 0, 'val': 1, 'test': 2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.timeenc = timeenc\n self.freq = freq\n self.train_only = train_only\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n df_raw = df_raw.dropna()\n\n cols = list(df_raw.columns)\n if self.features == 'S':\n cols.remove(self.target)\n cols.remove('date')\n\n num_train = int(len(df_raw) * (0.6 if not self.train_only else 1))\n num_test = int(len(df_raw) * 0.2)\n num_vali = len(df_raw) - num_train - num_test\n border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]\n border2s = [num_train, num_train + num_vali, len(df_raw)]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n if self.features == 'M' or self.features == 'MS':\n df_raw = df_raw[['date'] + cols]\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features == 'S':\n df_raw = df_raw[['date'] + cols + [self.target]]\n df_data = df_raw[[self.target]]\n\n ## min max scaler\n mms = MinMaxScaler(feature_range=(0, 1))\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n mms.fit_transform(train_data.values)\n data = mms.fit_transform(df_data.values)\n else:\n data = df_data.values\n\n df_stamp = df_raw[['date']][border1:border2]\n df_stamp['date'] = pd.to_datetime(df_stamp.date)\n if self.timeenc == 0:\n df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n data_stamp = df_stamp.drop(['date'], 1).values\n elif self.timeenc == 1:\n data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n data_stamp = data_stamp.transpose(1, 0)\n\n self.data_x = data[border1:border2]\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n def inverse_transform(self, data):\n mms = MinMaxScaler(feature_range=(0, 1))\n return mms.fit_transform(data.cpu())"
},
{
"identifier": "Dataset_Custom",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_Custom(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=False, timeenc=0, freq='h', train_only=False):\n\n if size == None:\n self.seq_len = 24 * 4 * 4\n self.label_len = 24 * 4\n self.pred_len = 24 * 4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train': 0, 'val': 1, 'test': 2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.timeenc = timeenc\n self.freq = freq\n self.train_only = train_only\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n df_raw = df_raw.dropna()\n\n cols = list(df_raw.columns)\n if self.features == 'S':\n cols.remove(self.target)\n cols.remove('date')\n\n num_train = int(len(df_raw) * (0.7 if not self.train_only else 1))\n num_test = int(len(df_raw) * 0.1)\n num_vali = len(df_raw) - num_train - num_test\n border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]\n border2s = [num_train, num_train + num_vali, len(df_raw)]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n if self.features == 'M' or self.features == 'MS':\n df_raw = df_raw[['date'] + cols]\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features == 'S':\n df_raw = df_raw[['date'] + cols + [self.target]]\n df_data = df_raw[[self.target]]\n\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n self.scaler.fit(train_data.values)\n data = self.scaler.transform(df_data.values)\n else:\n data = df_data.values\n\n df_stamp = df_raw[['date']][border1:border2]\n df_stamp['date'] = pd.to_datetime(df_stamp.date)\n if self.timeenc == 0:\n df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n data_stamp = df_stamp.drop(['date'], 1).values\n elif self.timeenc == 1:\n data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n data_stamp = data_stamp.transpose(1, 0)\n\n self.data_x = data[border1:border2]\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)"
},
{
"identifier": "Dataset_Pred",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_Pred(Dataset):\n def __init__(self, root_path, flag='pred', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None, train_only=False):\n\n if size == None:\n self.seq_len = 24 * 4 * 4\n self.label_len = 24 * 4\n self.pred_len = 24 * 4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['pred']\n\n self.features = features\n self.target = target\n self.scale = scale\n self.inverse = inverse\n self.timeenc = timeenc\n self.freq = freq\n self.cols = cols\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n\n if self.cols:\n cols = self.cols.copy()\n else:\n cols = list(df_raw.columns)\n self.cols = cols.copy()\n cols.remove('date')\n if self.features == 'S':\n cols.remove(self.target)\n border1 = len(df_raw) - self.seq_len\n border2 = len(df_raw)\n\n if self.features == 'M' or self.features == 'MS':\n df_raw = df_raw[['date'] + cols]\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features == 'S':\n df_raw = df_raw[['date'] + cols + [self.target]]\n df_data = df_raw[[self.target]]\n\n if self.scale:\n self.scaler.fit(df_data.values)\n data = self.scaler.transform(df_data.values)\n else:\n data = df_data.values\n\n tmp_stamp = df_raw[['date']][border1:border2]\n tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)\n pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len + 1, freq=self.freq)\n\n df_stamp = pd.DataFrame(columns=['date'])\n df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])\n self.future_dates = list(pred_dates[1:])\n if self.timeenc == 0:\n df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)\n df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)\n data_stamp = df_stamp.drop(['date'], 1).values\n elif self.timeenc == 1:\n data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n data_stamp = data_stamp.transpose(1, 0)\n\n self.data_x = data[border1:border2]\n if self.inverse:\n self.data_y = df_data.values[border1:border2]\n else:\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n if self.inverse:\n seq_y = self.data_x[r_begin:r_begin + self.label_len]\n else:\n seq_y = self.data_y[r_begin:r_begin + self.label_len]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len + 1\n\n def inverse_transform(self, data):\n return self.scaler.inverse_transform(data)"
},
{
"identifier": "Dataset_Custom_",
"path": "data_provider/data_loader.py",
"snippet": "class Dataset_Custom_(Dataset):\n def __init__(self, root_path, flag='train', size=None,\n features='S', data_path='ETTh1.csv',\n target='OT', scale=True, timeenc=0, freq='h', train_only=False):\n # size [seq_len, label_len, pred_len]\n # info\n if size == None:\n self.seq_len = 24 * 4 * 4\n self.label_len = 24 * 4\n self.pred_len = 24 * 4\n else:\n self.seq_len = size[0]\n self.label_len = size[1]\n self.pred_len = size[2]\n # init\n assert flag in ['train', 'test', 'val']\n type_map = {'train': 0, 'val': 1, 'test': 2}\n self.set_type = type_map[flag]\n\n self.features = features\n self.target = target\n self.scale = scale\n self.timeenc = timeenc\n self.freq = freq\n self.train_only = train_only\n\n self.root_path = root_path\n self.data_path = data_path\n self.__read_data__()\n\n def __read_data__(self):\n self.scaler = StandardScaler()\n df_raw = pd.read_csv(os.path.join(self.root_path,\n self.data_path))\n df_raw = df_raw.dropna()\n\n cols = list(df_raw.columns)\n if self.features == 'S':\n cols.remove(self.target)\n cols.remove('date')\n\n num_train = int(len(df_raw) * (0.7 if not self.train_only else 1))\n num_test = int(len(df_raw) * 0.1)\n num_vali = len(df_raw) - num_train - num_test\n border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]\n border2s = [num_train, num_train + num_vali, len(df_raw)]\n border1 = border1s[self.set_type]\n border2 = border2s[self.set_type]\n\n if self.features == 'M' or self.features == 'MS':\n df_raw = df_raw[['date'] + cols]\n cols_data = df_raw.columns[1:]\n df_data = df_raw[cols_data]\n elif self.features == 'S':\n df_raw = df_raw[['date'] + cols + [self.target]]\n df_data = df_raw[[self.target]]\n\n ## min max scaler\n mms = MinMaxScaler(feature_range=(0, 1))\n if self.scale:\n train_data = df_data[border1s[0]:border2s[0]]\n mms.fit_transform(train_data.values)\n data = mms.fit_transform(df_data.values)\n else:\n data = df_data.values\n\n df_stamp = df_raw[['date']][border1:border2]\n df_stamp['date'] = pd.to_datetime(df_stamp.date)\n if self.timeenc == 0:\n df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)\n df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)\n df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)\n df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)\n data_stamp = df_stamp.drop(['date'], 1).values\n elif self.timeenc == 1:\n data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)\n data_stamp = data_stamp.transpose(1, 0)\n\n self.data_x = data[border1:border2]\n self.data_y = data[border1:border2]\n self.data_stamp = data_stamp\n\n def __getitem__(self, index):\n s_begin = index\n s_end = s_begin + self.seq_len\n r_begin = s_end - self.label_len\n r_end = r_begin + self.label_len + self.pred_len\n\n seq_x = self.data_x[s_begin:s_end]\n seq_y = self.data_y[r_begin:r_end]\n seq_x_mark = self.data_stamp[s_begin:s_end]\n seq_y_mark = self.data_stamp[r_begin:r_end]\n\n return seq_x, seq_y, seq_x_mark, seq_y_mark\n\n def __len__(self):\n return len(self.data_x) - self.seq_len - self.pred_len + 1\n\n def inverse_transform(self, data):\n mms = MinMaxScaler(feature_range=(0, 1))\n return mms.fit_transform(data.cpu())\n #return self.scaler.inverse_transform(data)"
}
] | from data_provider.data_loader import Dataset_Covid, Dataset_Custom, Dataset_Pred, Dataset_Custom_
from torch.utils.data import DataLoader | 4,480 |
data_dict = {
'ETTh1': Dataset_Custom_,#Dataset_ETT_hour,
'ETTm1': Dataset_Custom_,
'traffic': Dataset_Custom,
'electricity': Dataset_Custom_,
'exchange': Dataset_Custom_,
'weather': Dataset_Custom_,
|
data_dict = {
'ETTh1': Dataset_Custom_,#Dataset_ETT_hour,
'ETTm1': Dataset_Custom_,
'traffic': Dataset_Custom,
'electricity': Dataset_Custom_,
'exchange': Dataset_Custom_,
'weather': Dataset_Custom_, | 'covid': Dataset_Covid, | 0 | 2023-10-23 13:15:14+00:00 | 8k |
apple/ml-nvas3d | nvas3d/utils/training_data_generation/generate_test_data.py | [
{
"identifier": "load_room_grid",
"path": "soundspaces_nvas3d/utils/aihabitat_utils.py",
"snippet": "def load_room_grid(\n room: str,\n grid_distance: float\n) -> T.Dict:\n \"\"\"\n Load grid data for a specified room. If the grid data does not exist, it generates one.\n\n Args:\n - room: Name of the room.\n - grid_distance: The spacing between grid points.\n\n Returns:\n - A dictionary containing grid information for the specified room.\n \"\"\"\n\n grid_distance_str = str(grid_distance).replace(\".\", \"_\")\n dirname_grid = f'data/scene_datasets/metadata/mp3d/grid_{grid_distance_str}'\n filename_grid = f'{dirname_grid}/grid_{room}.npy'\n if not os.path.exists(filename_grid):\n os.makedirs(dirname_grid, exist_ok=True)\n print(f'Computing grid_{room}...')\n from soundspaces_nvas3d.rir_generation.generate_grid import save_xy_grid_points\n grid_info = save_xy_grid_points(room, grid_distance, dirname_grid)\n\n # load grid\n grid_info = np.load(filename_grid, allow_pickle=True).item()\n\n return grid_info"
},
{
"identifier": "wiener_deconv_list",
"path": "soundspaces_nvas3d/utils/audio_utils.py",
"snippet": "def wiener_deconv_list(\n signal: T.List[torch.Tensor],\n kernel: T.List[torch.Tensor],\n snr: float,\n is_cpu: bool = False\n) -> torch.Tensor:\n \"\"\"\n wiener_deconv for list input.\n\n Args:\n - signal (torch.Tensor): List of signals.\n - kernel (torch.Tensor): List of kernels.\n - snr (float): Signal-to-noise ratio.\n - is_cpu (bool, optional): Flag to determine if the operation should be on the CPU.\n\n Returns:\n - torch.Tensor: Deconvolved signal.\n \"\"\"\n\n M = len(signal)\n if isinstance(signal, list):\n signal = torch.stack(signal).reshape(M, -1)\n assert signal.shape[0] == M\n kernel = torch.stack(kernel).reshape(M, -1)\n snr /= abs(kernel).max()\n\n if is_cpu:\n signal = signal.detach().cpu()\n kernel = kernel.detach().cpu()\n\n n_batch, n_samples = signal.shape\n\n # Pad the signals and kernels to avoid circular convolution\n padded_signal = F.pad(signal, (0, kernel.shape[-1] - 1))\n padded_kernel = F.pad(kernel, (0, signal.shape[-1] - 1))\n\n # Compute the Fourier transforms\n signal_fr = torch.fft.rfft(padded_signal, dim=-1)\n kernel_fr = torch.fft.rfft(padded_kernel, dim=-1)\n\n # Compute the Wiener filter in the frequency domain\n wiener_filter_fr = torch.conj(kernel_fr) / (torch.abs(kernel_fr)**2 + 1 / snr)\n\n # Apply the Wiener filter\n filtered_signal_fr = wiener_filter_fr * signal_fr\n\n # Compute the inverse Fourier transform\n filtered_signal = torch.fft.irfft(filtered_signal_fr, dim=-1)\n\n # Crop the filtered signals to the original size\n filtered_signal = filtered_signal[:, :n_samples]\n\n filtered_signal_list = [filtered_signal[i] for i in range(filtered_signal.size(0))]\n\n return filtered_signal_list"
},
{
"identifier": "clip_two",
"path": "nvas3d/utils/audio_utils.py",
"snippet": "def clip_two(audio1, audio2):\n \"\"\"\n Clips two audio signals to the same length.\n\n Args:\n audio1: First audio signal.\n audio2: Second audio signal.\n\n Returns: \n - Two audio signals of the same length.\n \"\"\"\n\n length_diff = audio1.shape[-1] - audio2.shape[-1]\n\n if length_diff == 0:\n return audio1, audio2\n elif length_diff > 0:\n audio1 = audio1[..., :audio2.shape[-1]]\n elif length_diff < 0:\n audio2 = audio2[..., :audio1.shape[-1]]\n\n return audio1, audio2"
},
{
"identifier": "normalize",
"path": "nvas3d/utils/utils.py",
"snippet": "def normalize(audio, norm='peak'):\n if norm == 'peak':\n peak = abs(audio).max()\n if peak != 0:\n return audio / peak\n else:\n return audio\n elif norm == 'rms':\n if torch.is_tensor(audio):\n audio = audio.numpy()\n audio_without_padding = np.trim_zeros(audio, trim='b')\n rms = np.sqrt(np.mean(np.square(audio_without_padding))) * 100\n if rms != 0:\n return audio / rms\n else:\n return audio\n else:\n raise NotImplementedError"
},
{
"identifier": "parse_librispeech_metadata",
"path": "nvas3d/utils/utils.py",
"snippet": "def parse_librispeech_metadata(filename: str) -> T.Dict:\n \"\"\" \n Reads LibriSpeech metadata from a csv file and returns a dictionary.\n Each entry in the dictionary maps a reader_id (as integer) to its corresponding gender. \n \"\"\"\n\n import csv\n\n # Dictionary to store reader_id and corresponding gender\n librispeech_metadata = {}\n\n with open(filename, 'r') as file:\n reader = csv.reader(file, delimiter='|')\n for row in reader:\n # Skip comment lines and header\n if row[0].startswith(';') or row[0].strip() == 'ID':\n continue\n reader_id = int(row[0]) # Convert string to integer\n sex = row[1].strip() # Remove extra spaces\n librispeech_metadata[reader_id] = sex\n\n return librispeech_metadata"
},
{
"identifier": "MP3D_SCENE_SPLITS",
"path": "nvas3d/utils/utils.py",
"snippet": "MP3D_SCENE_SPLITS = {\n 'demo': ['17DRP5sb8fy'],\n}"
},
{
"identifier": "sample_speech",
"path": "nvas3d/utils/generate_dataset_utils.py",
"snippet": "def sample_speech(files_librispeech, librispeech_metadata):\n source_speech = torch.zeros(1) # Initialize with a tensor of zeros\n while torch.all(source_speech == 0) or source_speech.shape[-1] < MIN_LENGTH_AUDIO: # Continue until a non-zero tensor is found\n filename_source = random.choice(files_librispeech)\n speaker_id = int(filename_source.split('/')[6])\n speaker_gender = librispeech_metadata[speaker_id]\n if speaker_gender == 'M':\n source_class = 'male'\n else:\n source_class = 'female'\n source_speech, _ = torchaudio.load(filename_source)\n source_speech = source_speech.reshape(-1)\n\n return source_speech, source_class"
},
{
"identifier": "sample_nonspeech",
"path": "nvas3d/utils/generate_dataset_utils.py",
"snippet": "def sample_nonspeech(all_instruments_dir):\n class_dir = random.choice(all_instruments_dir)\n\n # Ensure that the class is not 'Speech'\n while 'Speech' in class_dir:\n class_dir = random.choice(all_instruments_dir)\n\n files_source = glob.glob(class_dir + '/**/*.flac', recursive=True)\n\n source_audio = torch.zeros(1) # Initialize with a tensor of zeros\n while torch.all(source_audio == 0) or source_audio.shape[-1] < MIN_LENGTH_AUDIO: # Continue until a non-zero tensor is found\n filename_source = random.choice(files_source)\n source_class = class_dir.split('/')[3]\n source_audio, _ = torchaudio.load(filename_source)\n source_audio = source_audio.reshape(-1)\n\n return source_audio, source_class"
},
{
"identifier": "sample_acoustic_guitar",
"path": "nvas3d/utils/generate_dataset_utils.py",
"snippet": "def sample_acoustic_guitar(all_instruments_dir):\n guitar_dir = [dirname for dirname in all_instruments_dir if dirname.split('/')[4] == 'Acoustic Guitar (steel)']\n\n class_dir = random.choice(guitar_dir)\n\n files_source = glob.glob(class_dir + '/**/*.flac', recursive=True)\n\n source_audio = torch.zeros(1) # Initialize with a tensor of zeros\n while torch.all(source_audio == 0) or source_audio.shape[-1] < MIN_LENGTH_AUDIO: # Continue until a non-zero tensor is found\n filename_source = random.choice(files_source)\n source_class = 'guitar'\n source_audio, _ = torchaudio.load(filename_source)\n source_audio = source_audio.reshape(-1)\n\n return source_audio, source_class"
},
{
"identifier": "sample_all",
"path": "nvas3d/utils/generate_dataset_utils.py",
"snippet": "def sample_all(all_instruments_dir, librispeech_metadata):\n class_dir = random.choice(all_instruments_dir)\n files_source = glob.glob(class_dir + '/**/*.flac', recursive=True)\n\n source_audio = torch.zeros(1) # Initialize with a tensor of zeros\n while torch.all(source_audio == 0) or source_audio.shape[-1] < MIN_LENGTH_AUDIO: # Continue until a non-zero tensor is found\n filename_source = random.choice(files_source)\n source_class = class_dir.split('/')[3]\n source_audio, _ = torchaudio.load(filename_source)\n source_audio = source_audio.reshape(-1)\n\n if source_class == 'Speech':\n speaker_id = int(filename_source.split('/')[6])\n speaker_gender = librispeech_metadata[speaker_id]\n if speaker_gender == 'M':\n source_class = 'male'\n else:\n source_class = 'female'\n\n return source_audio, source_class"
},
{
"identifier": "sample_instrument",
"path": "nvas3d/utils/generate_dataset_utils.py",
"snippet": "def sample_instrument(all_instruments_dir, librispeech_metadata, classname):\n guitar_dir = [dirname for dirname in all_instruments_dir if dirname.split('/')[3] == classname] # e.g., Guitar\n\n class_dir = random.choice(guitar_dir)\n\n files_source = glob.glob(class_dir + '/**/*.flac', recursive=True)\n\n source_audio = torch.zeros(1) # Initialize with a tensor of zeros\n while torch.all(source_audio == 0) or source_audio.shape[-1] < MIN_LENGTH_AUDIO: # Continue until a non-zero tensor is found\n filename_source = random.choice(files_source)\n source_class = 'guitar'\n source_audio, _ = torchaudio.load(filename_source)\n source_audio = source_audio.reshape(-1)\n\n return source_audio, source_class"
},
{
"identifier": "clip_source",
"path": "nvas3d/utils/generate_dataset_utils.py",
"snippet": "def clip_source(\n source1_audio: torch.Tensor,\n source2_audio: torch.Tensor,\n len_clip: int\n) -> T.Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Clip source audio tensors for faster convolution.\n\n Args:\n - source1_audio: First source audio tensor.\n - source2_audio: Second source audio tensor.\n - len_clip: Desired length of the output audio tensors.\n\n Returns:\n - Clipped source1_audio and source2_audio.\n \"\"\"\n\n # pad audio\n if len_clip > source1_audio.shape[0]:\n source1_audio = F.pad(source1_audio, (0, len_clip - source1_audio.shape[0]))\n source1_audio = F.pad(source1_audio, (0, max(0, len_clip - source1_audio.shape[0])))\n source2_audio = F.pad(source2_audio, (0, max(0, len_clip - source2_audio.shape[0])))\n\n # clip\n start_index = np.random.randint(0, source1_audio.shape[0] - len_clip) \\\n if source1_audio.shape[0] != len_clip else 0\n source1_audio_clipped = source1_audio[start_index: start_index + len_clip]\n source2_audio_clipped = source2_audio[start_index: start_index + len_clip]\n\n return source1_audio_clipped, source2_audio_clipped"
},
{
"identifier": "load_ir_source_receiver",
"path": "nvas3d/utils/generate_dataset_utils.py",
"snippet": "def load_ir_source_receiver(\n ir_dir: str,\n room: str,\n source_idx: int,\n receiver_idx_list: T.List[int],\n ir_length: int\n) -> T.List[torch.Tensor]:\n \"\"\"\n Load impulse responses for specific source and receivers in a room.\n\n Args:\n - ir_dir: Directory containing impulse response files.\n - room: Name of the room.\n - source_idx: Index of the source.\n - receiver_idx_list: List of receiver indices.\n - ir_length: Length of the impulse response to be loaded.\n\n Returns:\n - List of loaded impulse responses (first channel only).\n \"\"\"\n\n ir_list = []\n for receiver_idx in receiver_idx_list:\n filename_ir = f'{ir_dir}/{room}/ir_{room}_{source_idx}_{receiver_idx}.wav'\n ir, _ = torchaudio.load(filename_ir)\n if ir[0].shape[0] > ir_length:\n ir0 = ir[0][:ir_length]\n else:\n ir0 = F.pad(ir[0], (0, ir_length - ir[0].shape[0]))\n ir_list.append(ir0)\n\n return ir_list"
},
{
"identifier": "save_audio_list",
"path": "nvas3d/utils/generate_dataset_utils.py",
"snippet": "def save_audio_list(\n filename: str,\n audio_list: T.List[torch.Tensor],\n sample_rate: int,\n audio_format: str\n):\n \"\"\"\n Save a list of audio tensors to files.\n\n Args:\n - filename: Filename to save audio.\n - audio_list: List of audio tensors to save.\n - sample_rate: Sample rate of audio.\n - audio_format: File format to save audio.\n \"\"\"\n\n for idx_audio, audio in enumerate(audio_list):\n torchaudio.save(f'{filename}_{idx_audio+1}.{audio_format}', audio.unsqueeze(0), sample_rate)"
},
{
"identifier": "compute_reverb",
"path": "nvas3d/utils/generate_dataset_utils.py",
"snippet": "def compute_reverb(\n source_audio: torch.Tensor,\n ir_list: T.List[torch.Tensor],\n padding: str = 'valid'\n) -> T.List[torch.Tensor]:\n \"\"\"\n Compute reverberated audio signals by convolving source audio with impulse responses.\n\n Args:\n - source_audio: Source audio signal (dry) to be reverberated.\n - ir_list: List of impulse responses for reverberation.\n - padding: Padding mode for convolution ('valid' or 'full').\n\n Returns:\n - A list of reverberated audio signals.\n \"\"\"\n\n reverb_list = []\n for ir in ir_list:\n reverb = fftconvolve(source_audio, ir, padding)\n reverb_list.append(torch.from_numpy(reverb))\n\n return reverb_list"
}
] | import os
import glob
import json
import random
import subprocess
import concurrent.futures
import torch
import torchaudio
from itertools import product
from tqdm import tqdm
from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid
from soundspaces_nvas3d.utils.audio_utils import wiener_deconv_list
from nvas3d.utils.audio_utils import clip_two
from nvas3d.utils.utils import normalize, parse_librispeech_metadata, MP3D_SCENE_SPLITS
from nvas3d.utils.generate_dataset_utils import sample_speech, sample_nonspeech, sample_acoustic_guitar, sample_all, sample_instrument, clip_source, load_ir_source_receiver, save_audio_list, compute_reverb | 4,976 |
random.seed(42)
DATASET_NAME = f'nvas3d_square_{SOURCE1_DATA}_{SOURCE2_DATA}_queryall_{num_id_per_room}_v3'
os.makedirs(f'data/{DATASET_NAME}', exist_ok=True)
grid_distance = 1.0
grid_distance_str = str(grid_distance).replace(".", "_")
target_shape_t = 256
ir_length = 72000
ir_clip_idx = ir_length - 1
hop_length = 480
len_clip = hop_length * (target_shape_t - 1) + ir_length - 1
sample_rate = 48000
snr = 100
audio_format = 'flac'
for split in ['val']:
# LibriSpeech
if split == 'train':
librispeech_dir = f'data/MIDI/clip/Speech/LibriSpeech48k/train'
elif split == 'val':
librispeech_dir = f'data/MIDI/clip/Speech/LibriSpeech48k/validation'
elif split == 'test':
librispeech_dir = f'data/MIDI/clip/Speech/LibriSpeech48k/test'
else:
librispeech_dir = f'data/MIDI/clip/Speech/LibriSpeech48k/validation'
files_librispeech = glob.glob(librispeech_dir + '/**/*.flac', recursive=True)
librispeech_metadata = parse_librispeech_metadata(f'data/MIDI/clip/Speech/LibriSpeech48k/SPEAKERS.TXT')
# MIDI
if split == 'train':
all_instruments_dir = [path for path in glob.glob(os.path.join('data/MIDI/clip', '*/*', 'train')) if os.path.isdir(path)]
elif split == 'val':
all_instruments_dir = [path for path in glob.glob(os.path.join('data/MIDI/clip', '*/*', 'validation')) if os.path.isdir(path)]
elif split == 'test':
all_instruments_dir = [path for path in glob.glob(os.path.join('data/MIDI/clip', '*/*', 'test')) if os.path.isdir(path)]
else:
all_instruments_dir = [path for path in glob.glob(os.path.join('data/MIDI/clip', '*/*', 'validation')) if os.path.isdir(path)]
# RIR
if split == 'val_trainscene':
split_scene = 'train'
else:
split_scene = split
ir_dir = f'data/nvas3d_square/ir/{split_scene}/grid_{grid_distance_str}'
# Image
dirname_sourceimage = f'data/nvas3d_square/image/{split_scene}/grid_{grid_distance_str}'
# Iterate over rooms
for i_room, room in enumerate(tqdm(MP3D_SCENE_SPLITS[split_scene])):
grid_points = load_room_grid(room, grid_distance)['grid_points']
num_points = grid_points.shape[0]
total_pairs = []
filename = f'data/nvas3d_square/metadata/grid_{grid_distance_str}/{room}_square.json' # from generate_metadata_square.json
with open(filename, 'r') as file:
square_data = json.load(file)
pairs_all = square_data['selected_pairs']
# Add each pair with room id to the total list
random.shuffle(pairs_all)
# pairs = pairs[:num_id_per_room]
pairs = []
for pair in pairs_all:
source_idx_list, receiver_idx_list, novel_receiver_idx = pair
if (novel_receiver_idx not in source_idx_list) and (novel_receiver_idx not in receiver_idx_list):
pairs.append(pair)
# else:
# print(f'invalid idx: {source_idx_list}, {receiver_idx_list}, {novel_receiver_idx}')
if len(pairs) >= num_id_per_room:
break
# All IRs
# Initialize a list to store all combinations
all_combinations = []
# Iterate over selected pairs
for pair in pairs:
# Unpack the pair
_, receiver_idxs, _ = pair
# Get all combinations of source and receiver indices
comb = product(list(range(num_points)), receiver_idxs)
# Add these combinations to the list
all_combinations.extend(comb)
all_combinations = list(set(all_combinations)) # remove redundancy
# download wav files # Replace to render IR
# temp_list = set()
# with concurrent.futures.ThreadPoolExecutor() as executor:
# for source_idx in executor.map(download_wav, all_combinations):
# temp_list.add(source_idx)
# temp_list = list(temp_list)
# Render image
dirname_target_image = f'data/{DATASET_NAME}/{split}/{room}/image'
os.makedirs(dirname_target_image, exist_ok=True)
query_idx_list = list(range(num_points))
subprocess.run(['python', 'soundspaces_nvas3d/image_rendering/generate_target_image.py', '--room', room, '--dirname', dirname_target_image, '--source_idx_list', ' '.join(map(str, query_idx_list))])
# For each pair, make data
for i_pair, pair in enumerate(tqdm(pairs)):
dirname = f'data/{DATASET_NAME}/{split}/{room}/{i_pair}'
source_idx_list, receiver_idx_list, novel_receiver_idx = pair
os.makedirs(dirname, exist_ok=True)
# Compute source
os.makedirs(f'{dirname}/source', exist_ok=True)
if SOURCE1_DATA == 'speech':
source1_audio, source1_class = sample_speech(files_librispeech, librispeech_metadata)
elif SOURCE1_DATA == 'nonspeech':
source1_audio, source1_class = sample_nonspeech(all_instruments_dir)
elif SOURCE1_DATA == 'guitar':
source1_audio, source1_class = sample_acoustic_guitar(all_instruments_dir)
elif SOURCE1_DATA == 'all':
source1_audio, source1_class = sample_all(all_instruments_dir, librispeech_metadata)
else:
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
os.makedirs('data/temp', exist_ok=True)
SOURCE1_DATA = 'Guitar'
SOURCE2_DATA = 'Guitar'
num_id_per_room = 1
random.seed(42)
DATASET_NAME = f'nvas3d_square_{SOURCE1_DATA}_{SOURCE2_DATA}_queryall_{num_id_per_room}_v3'
os.makedirs(f'data/{DATASET_NAME}', exist_ok=True)
grid_distance = 1.0
grid_distance_str = str(grid_distance).replace(".", "_")
target_shape_t = 256
ir_length = 72000
ir_clip_idx = ir_length - 1
hop_length = 480
len_clip = hop_length * (target_shape_t - 1) + ir_length - 1
sample_rate = 48000
snr = 100
audio_format = 'flac'
for split in ['val']:
# LibriSpeech
if split == 'train':
librispeech_dir = f'data/MIDI/clip/Speech/LibriSpeech48k/train'
elif split == 'val':
librispeech_dir = f'data/MIDI/clip/Speech/LibriSpeech48k/validation'
elif split == 'test':
librispeech_dir = f'data/MIDI/clip/Speech/LibriSpeech48k/test'
else:
librispeech_dir = f'data/MIDI/clip/Speech/LibriSpeech48k/validation'
files_librispeech = glob.glob(librispeech_dir + '/**/*.flac', recursive=True)
librispeech_metadata = parse_librispeech_metadata(f'data/MIDI/clip/Speech/LibriSpeech48k/SPEAKERS.TXT')
# MIDI
if split == 'train':
all_instruments_dir = [path for path in glob.glob(os.path.join('data/MIDI/clip', '*/*', 'train')) if os.path.isdir(path)]
elif split == 'val':
all_instruments_dir = [path for path in glob.glob(os.path.join('data/MIDI/clip', '*/*', 'validation')) if os.path.isdir(path)]
elif split == 'test':
all_instruments_dir = [path for path in glob.glob(os.path.join('data/MIDI/clip', '*/*', 'test')) if os.path.isdir(path)]
else:
all_instruments_dir = [path for path in glob.glob(os.path.join('data/MIDI/clip', '*/*', 'validation')) if os.path.isdir(path)]
# RIR
if split == 'val_trainscene':
split_scene = 'train'
else:
split_scene = split
ir_dir = f'data/nvas3d_square/ir/{split_scene}/grid_{grid_distance_str}'
# Image
dirname_sourceimage = f'data/nvas3d_square/image/{split_scene}/grid_{grid_distance_str}'
# Iterate over rooms
for i_room, room in enumerate(tqdm(MP3D_SCENE_SPLITS[split_scene])):
grid_points = load_room_grid(room, grid_distance)['grid_points']
num_points = grid_points.shape[0]
total_pairs = []
filename = f'data/nvas3d_square/metadata/grid_{grid_distance_str}/{room}_square.json' # from generate_metadata_square.json
with open(filename, 'r') as file:
square_data = json.load(file)
pairs_all = square_data['selected_pairs']
# Add each pair with room id to the total list
random.shuffle(pairs_all)
# pairs = pairs[:num_id_per_room]
pairs = []
for pair in pairs_all:
source_idx_list, receiver_idx_list, novel_receiver_idx = pair
if (novel_receiver_idx not in source_idx_list) and (novel_receiver_idx not in receiver_idx_list):
pairs.append(pair)
# else:
# print(f'invalid idx: {source_idx_list}, {receiver_idx_list}, {novel_receiver_idx}')
if len(pairs) >= num_id_per_room:
break
# All IRs
# Initialize a list to store all combinations
all_combinations = []
# Iterate over selected pairs
for pair in pairs:
# Unpack the pair
_, receiver_idxs, _ = pair
# Get all combinations of source and receiver indices
comb = product(list(range(num_points)), receiver_idxs)
# Add these combinations to the list
all_combinations.extend(comb)
all_combinations = list(set(all_combinations)) # remove redundancy
# download wav files # Replace to render IR
# temp_list = set()
# with concurrent.futures.ThreadPoolExecutor() as executor:
# for source_idx in executor.map(download_wav, all_combinations):
# temp_list.add(source_idx)
# temp_list = list(temp_list)
# Render image
dirname_target_image = f'data/{DATASET_NAME}/{split}/{room}/image'
os.makedirs(dirname_target_image, exist_ok=True)
query_idx_list = list(range(num_points))
subprocess.run(['python', 'soundspaces_nvas3d/image_rendering/generate_target_image.py', '--room', room, '--dirname', dirname_target_image, '--source_idx_list', ' '.join(map(str, query_idx_list))])
# For each pair, make data
for i_pair, pair in enumerate(tqdm(pairs)):
dirname = f'data/{DATASET_NAME}/{split}/{room}/{i_pair}'
source_idx_list, receiver_idx_list, novel_receiver_idx = pair
os.makedirs(dirname, exist_ok=True)
# Compute source
os.makedirs(f'{dirname}/source', exist_ok=True)
if SOURCE1_DATA == 'speech':
source1_audio, source1_class = sample_speech(files_librispeech, librispeech_metadata)
elif SOURCE1_DATA == 'nonspeech':
source1_audio, source1_class = sample_nonspeech(all_instruments_dir)
elif SOURCE1_DATA == 'guitar':
source1_audio, source1_class = sample_acoustic_guitar(all_instruments_dir)
elif SOURCE1_DATA == 'all':
source1_audio, source1_class = sample_all(all_instruments_dir, librispeech_metadata)
else: | source1_audio, source1_class = sample_instrument(all_instruments_dir, librispeech_metadata, SOURCE1_DATA) | 10 | 2023-10-19 05:35:54+00:00 | 8k |
virevolai/logos-shift-client | logos_shift_client/logos_shift.py | [
{
"identifier": "BohitaClient",
"path": "logos_shift_client/bohita.py",
"snippet": "class BohitaClient:\n def __init__(self, api_key: str):\n if api_key is None:\n logging.warning(\n \"No API KEY provided. No data will be sent to Bohita and automatic routing will not happen\"\n )\n self.headers = None\n else:\n self.headers = {\n \"Content-Type\": \"application/json\",\n \"Bohita-Auth\": f\"Bearer {api_key}\",\n }\n self.async_client = httpx.AsyncClient(headers=self.headers, timeout=TIMEOUT)\n\n def post_instrumentation_data(self, data, dataset):\n if not self.headers:\n return\n try:\n response = requests.post(\n f\"{BASE_URL}/instrumentation/\",\n headers=self.headers,\n json={**data, \"dataset\": dataset},\n timeout=TIMEOUT,\n )\n response.raise_for_status()\n except requests.RequestException as e:\n logger.error(\"Failed to post instrumentation data: %s\", str(e))\n\n async def post_instrumentation_data_async(self, data, dataset):\n if not self.headers:\n return\n try:\n response = await self.async_client.post(\n f\"{BASE_URL}/instrumentation/\", json={**data, \"dataset\": dataset}\n )\n response.raise_for_status()\n except httpx.RequestError as e:\n logger.error(\"Failed to post instrumentation data: %s\", str(e))\n\n def get_config(self):\n if not self.headers:\n return {}\n try:\n response = requests.get(\n f\"{BASE_URL}/config\", headers=self.headers, timeout=TIMEOUT\n )\n response.raise_for_status()\n return response.json()\n except requests.RequestException as e:\n logger.error(\"Failed to get configuration: %s\", str(e))\n return {}\n\n async def get_config_async(self):\n if not self.headers:\n return {}\n try:\n response = await self.async_client.get(f\"{BASE_URL}/config\")\n response.raise_for_status()\n return response.json()\n except httpx.RequestError as e:\n logger.error(\"Failed to get configuration: %s\", str(e))\n return {}\n\n def predict(self, **kwargs):\n if not self.headers:\n return\n try:\n response = requests.post(\n f\"{BASE_URL}/predict\",\n headers=self.headers,\n json=kwargs,\n timeout=TIMEOUT,\n )\n response.raise_for_status()\n return response.json()\n except requests.RequestException as e:\n logger.error(\"Failed to make prediction: %s\", str(e))\n\n async def predict_async(self, **kwargs):\n if not self.headers:\n return\n try:\n response = await self.async_client.post(f\"{BASE_URL}/predict\", json=kwargs)\n response.raise_for_status()\n return response.json()\n except httpx.RequestError as e:\n logger.error(\"Failed to make prediction: %s\", str(e))"
},
{
"identifier": "APIRouter",
"path": "logos_shift_client/router.py",
"snippet": "class APIRouter:\n \"\"\"\n APIRouter is responsible for routing API calls based on the provided configuration.\n\n It supports three modes:\n - \"never\": Always use the old API.\n - \"random\": Randomly choose between the old and new API based on a threshold.\n - \"user_based\": Decide based on a hash of the user ID.\n\n Attributes:\n bohita_client (BohitaClient): The client used to communicate with the Bohita platform.\n threshold (float): The percentage of requests to route to the new API. Default is 0.1 (10%).\n mode (str): The routing mode. Can be \"never\", \"random\", or \"user_based\". Default is \"never\".\n call_count (int): The number of API calls made.\n conf_frequency (int): How frequently to fetch configuration updates from the server.\n\n Examples:\n >>> router = APIRouter(bohita_client, threshold=0.2, mode=\"random\")\n >>> api_to_call = router.get_api_to_call(old_api_func)\n \"\"\"\n\n def __init__(self, bohita_client=None, threshold=0.1, mode=\"never\"):\n \"\"\"\n Initializes a new instance of APIRouter.\n\n Args:\n bohita_client (Optional[BohitaClient]): An instance of BohitaClient used to communicate with the Bohita platform.\n threshold (float): The percentage of requests to route to the new API. Default is 0.1 (10%).\n mode (str): The routing mode. Can be \"never\", \"random\", or \"user_based\". Default is \"never\".\n \"\"\"\n self.bohita_client = bohita_client\n if not 0 <= threshold <= 1:\n raise ValueError(\"Threshold must be between 0 and 1\")\n self.threshold = threshold # precentage of requests to new API\n self.mode = mode # \"never\", \"random\" or \"user_based\"\n self.call_count, self.conf_frequency = (\n 0,\n 1_000,\n ) # How frequently to fetch config\n logger.info(f\"Initialized {mode} router\")\n self._get_configuration()\n\n async def _get_configuration_common(self, is_async):\n \"\"\"\n Fetches the routing configuration from the Bohita platform and updates the router's settings.\n\n This method is called periodically based on the conf_frequency setting.\n \"\"\"\n try:\n logger.info(\"Checking for config updates\")\n if is_async:\n config = await self.bohita_client.get_config_async()\n else:\n config = self.bohita_client.get_config()\n self.threshold = config.get(\"threshold\", self.threshold)\n self.mode = config.get(\"mode\", self.mode)\n self.conf_frequency = config.get(\"frequency\", self.conf_frequency)\n logger.info(\"Configuration updated successfully\")\n except Exception as e:\n logger.warning(\"Could not get configuration from server: %s\", str(e))\n logger.warning(\"If the problem persists, this instance might be stale\")\n\n def _get_configuration(self):\n asyncio.run(self._get_configuration_common(False))\n\n async def _get_configuration_async(self):\n await self._get_configuration_common(True)\n\n def _get_user_hash(self, user_id):\n return int(hashlib.md5(str(user_id).encode()).hexdigest(), 16)\n\n def should_route_to_new_api(self, user_id=None):\n \"\"\"\n Determines whether the next API call should be routed to the new API based on the current mode and threshold.\n\n Args:\n user_id (Optional[str]): The user ID for user-based routing. Required if mode is \"user_based\".\n\n Returns:\n bool: True if the call should be routed to the new API, False otherwise.\n \"\"\"\n if self.mode == \"random\":\n return random.random() < self.threshold\n elif self.mode == \"user_based\":\n if user_id:\n return self._get_user_hash(user_id) % 100 < self.threshold * 100\n return False\n\n def get_api_to_call(self, old_api_func, user_id=None):\n \"\"\"\n Determines which API function to call based on the routing configuration.\n\n Args:\n old_api_func (callable): The old API function.\n user_id (Optional[str]): The user ID for user-based routing.\n\n Returns:\n callable: The API function to call.\n \"\"\"\n self.call_count += 1\n if self.call_count % self.conf_frequency == 0:\n self._get_configuration()\n if self.should_route_to_new_api(user_id):\n return self.call_new_api\n return old_api_func\n\n async def get_api_to_call_async(self, old_api_func, user_id=None):\n \"\"\"\n Determines which API function to call based on the routing configuration.\n\n Args:\n old_api_func (callable): The old API function.\n user_id (Optional[str]): The user ID for user-based routing.\n\n Returns:\n callable: The API function to call.\n \"\"\"\n self.call_count += 1\n if self.call_count % self.conf_frequency == 0:\n await self._get_configuration_async()\n if self.should_route_to_new_api(user_id):\n return self.call_new_api_async\n return old_api_func\n\n async def call_new_api_async(self, **kwargs):\n await self.bohita_client.predict_async(**kwargs)\n\n def call_new_api(self, **kwargs):\n self.bohita_client.predict(**kwargs)"
}
] | import asyncio
import logging
import threading
import time
import uuid
from pathlib import Path
from collections import deque
from typing import Optional, Union
from tenacity import retry, wait_fixed
from .bohita import BohitaClient
from .router import APIRouter | 3,642 | self.file_handle.write(str(data) + "\n")
except Exception as e:
logger.error(
"Could not save to local file. This might happen because local file format is simple. Local does str(data)"
)
logger.exception(e)
@retry(wait=wait_fixed(3))
def send_data(self, data, dataset="default"):
logger.info(f"BufferManager: Sending data to dataset {dataset}. Data: {data}")
self.bohita_client.post_instrumentation_data(data, dataset)
self._write_to_local(data)
def send_data_from_buffers(self):
while True:
time.sleep(self.check_seconds)
for buffer in self.buffers:
with buffer["lock"]:
if buffer["data"]:
data_to_send = list(buffer["data"])
buffer["data"].clear()
for item in data_to_send:
logger.debug(f"Sending {item}")
self.send_data(item, dataset=item["dataset"])
def register_buffer(self, buffer, lock):
self.buffers.append({"data": buffer, "lock": lock})
class LogosShift:
"""
LogosShift is a tool for capturing, logging, and optionally sending function call data to a remote server using rollouts.
It allows developers to easily instrument their functions, capturing input arguments, output results, metadata, and optionally sending this data to the Bohita platform for further analysis. Data can also be stored locally.
It supports both synchronous and asynchronous functions. For asynchronous functions, it automatically detects and wraps them accordingly.
Attributes:
bohita_client (BohitaClient): The client used to send data to the Bohita platform.
max_entries (int): The maximum number of entries to store in a buffer before switching to the next buffer.
buffer_A (collections.deque): The first data buffer.
buffer_B (collections.deque): The second data buffer.
active_buffer (collections.deque): The currently active data buffer.
lock (threading.Lock): A lock to ensure thread-safety when modifying the buffers.
buffer_manager (BufferManager): The manager for handling data buffers and sending data.
router (APIRouter): The router for determining which API to call based on the function and user.
Examples:
>>> logos_shift = LogosShift(api_key="YOUR_API_KEY")
>>> @logos_shift()
... def add(x, y):
... return x + y
...
>>> result = add(1, 2)
Asynchronous function:
>>> @logos_shift()
... async def add_async(x, y):
... return x + y
...
>>> result = await add_async(1, 2)
To provide feedback:
>>> logos_shift.provide_feedback(result['bohita_logos_shift_id'], "success")
To specify a dataset:
>>> @logos_shift(dataset="sales")
... def add_sales(x, y):
... return x + y
Using metadata:
>>> @logos_shift()
... def multiply(x, y, logos_shift_metadata={"user_id": "12345"}):
... return x * y
To store data locally:
>>> logos_shift = LogosShift(api_key="YOUR_API_KEY", filename="api_calls.log")
To disable sending data to Bohita:
>>> logos_shift = LogosShift(api_key=None, filename="api_calls.log")
"""
def __init__(
self,
api_key,
bohita_client=None,
router=None,
max_entries=MAX_ENTRIES,
check_seconds=CHECK_SECONDS,
filename=None,
):
"""
Initializes a new instance of LogosShift.
Args:
api_key (str): Your API key for the Bohita platform.
bohita_client (Optional[BohitaClient]): An optional instance of BohitaClient. If not provided, a new instance will be created.
router (Optional[APIRouter]): An optional instance of APIRouter. If not provided, a new instance will be created.
max_entries (int): The maximum number of entries to store in a buffer before switching to the next buffer. Default is 10.
check_seconds (int): The interval in seconds between checks to send data from the buffers. Default is 5.
filename (Optional[Union[str, Path]]): The file path for local data storage. If None, data is not stored locally.
Examples:
>>> logos_shift = LogosShift(api_key="YOUR_API_KEY")
>>> logos_shift = LogosShift(api_key="YOUR_API_KEY", filename="api_calls.log")
"""
self.max_entries = max_entries
self.bohita_client = (
bohita_client if bohita_client else BohitaClient(api_key=api_key)
)
self.buffer_A, self.buffer_B = deque(), deque()
self.active_buffer = self.buffer_A
self.lock = threading.Lock()
self.buffer_manager = BufferManager(
bohita_client=self.bohita_client,
check_seconds=check_seconds,
filename=filename,
)
self.buffer_manager.register_buffer(self.buffer_A, self.lock)
self.buffer_manager.register_buffer(self.buffer_B, self.lock)
|
logger = logging.getLogger(__name__)
MAX_ENTRIES = 10
CHECK_SECONDS = 5
class SingletonMeta(type):
_instances = {}
_lock = threading.Lock()
def __call__(cls, *args, **kwargs):
with cls._lock:
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class BufferManager(metaclass=SingletonMeta):
"""
A singleton class responsible for managing data buffers and sending data to a remote server.
Attributes:
bohita_client: An instance of BohitaClient used to send data to the remote server.
check_seconds: The interval in seconds between checks to send data from the buffers.
filepath: The file path for local data storage. If None, data is not stored locally.
buffers: A list of data buffers.
thread: The thread responsible for sending data from the buffers.
"""
_instance = None
lock = threading.Lock()
def __init__(
self,
bohita_client: BohitaClient,
check_seconds: int = CHECK_SECONDS,
filename: Optional[Union[str, Path]] = None,
):
self.bohita_client = bohita_client
self.check_seconds = check_seconds
self.open_handle(filename)
self.buffers = []
self.thread = threading.Thread(target=self.send_data_from_buffers, daemon=True)
self.thread.start()
logger.info("BufferManager: Initialized and sending thread started.")
def open_handle(self, filename: str):
if filename:
filepath = Path(filename)
logdir = filepath.parent
if not logdir.exists():
raise Exception(f"Directory {logdir} does not exist!")
self.file_handle = open(filepath, "a", buffering=1)
logger.debug(f"Buffered file handler opened for local file {filename}")
else:
self.file_handle = None
def __del__(self):
if self.file_handle:
self.file_handle.close()
logger.debug("Buffered file handle closed")
def _write_to_local(self, data):
try:
if self.file_handle:
self.file_handle.write(str(data) + "\n")
except Exception as e:
logger.error(
"Could not save to local file. This might happen because local file format is simple. Local does str(data)"
)
logger.exception(e)
@retry(wait=wait_fixed(3))
def send_data(self, data, dataset="default"):
logger.info(f"BufferManager: Sending data to dataset {dataset}. Data: {data}")
self.bohita_client.post_instrumentation_data(data, dataset)
self._write_to_local(data)
def send_data_from_buffers(self):
while True:
time.sleep(self.check_seconds)
for buffer in self.buffers:
with buffer["lock"]:
if buffer["data"]:
data_to_send = list(buffer["data"])
buffer["data"].clear()
for item in data_to_send:
logger.debug(f"Sending {item}")
self.send_data(item, dataset=item["dataset"])
def register_buffer(self, buffer, lock):
self.buffers.append({"data": buffer, "lock": lock})
class LogosShift:
"""
LogosShift is a tool for capturing, logging, and optionally sending function call data to a remote server using rollouts.
It allows developers to easily instrument their functions, capturing input arguments, output results, metadata, and optionally sending this data to the Bohita platform for further analysis. Data can also be stored locally.
It supports both synchronous and asynchronous functions. For asynchronous functions, it automatically detects and wraps them accordingly.
Attributes:
bohita_client (BohitaClient): The client used to send data to the Bohita platform.
max_entries (int): The maximum number of entries to store in a buffer before switching to the next buffer.
buffer_A (collections.deque): The first data buffer.
buffer_B (collections.deque): The second data buffer.
active_buffer (collections.deque): The currently active data buffer.
lock (threading.Lock): A lock to ensure thread-safety when modifying the buffers.
buffer_manager (BufferManager): The manager for handling data buffers and sending data.
router (APIRouter): The router for determining which API to call based on the function and user.
Examples:
>>> logos_shift = LogosShift(api_key="YOUR_API_KEY")
>>> @logos_shift()
... def add(x, y):
... return x + y
...
>>> result = add(1, 2)
Asynchronous function:
>>> @logos_shift()
... async def add_async(x, y):
... return x + y
...
>>> result = await add_async(1, 2)
To provide feedback:
>>> logos_shift.provide_feedback(result['bohita_logos_shift_id'], "success")
To specify a dataset:
>>> @logos_shift(dataset="sales")
... def add_sales(x, y):
... return x + y
Using metadata:
>>> @logos_shift()
... def multiply(x, y, logos_shift_metadata={"user_id": "12345"}):
... return x * y
To store data locally:
>>> logos_shift = LogosShift(api_key="YOUR_API_KEY", filename="api_calls.log")
To disable sending data to Bohita:
>>> logos_shift = LogosShift(api_key=None, filename="api_calls.log")
"""
def __init__(
self,
api_key,
bohita_client=None,
router=None,
max_entries=MAX_ENTRIES,
check_seconds=CHECK_SECONDS,
filename=None,
):
"""
Initializes a new instance of LogosShift.
Args:
api_key (str): Your API key for the Bohita platform.
bohita_client (Optional[BohitaClient]): An optional instance of BohitaClient. If not provided, a new instance will be created.
router (Optional[APIRouter]): An optional instance of APIRouter. If not provided, a new instance will be created.
max_entries (int): The maximum number of entries to store in a buffer before switching to the next buffer. Default is 10.
check_seconds (int): The interval in seconds between checks to send data from the buffers. Default is 5.
filename (Optional[Union[str, Path]]): The file path for local data storage. If None, data is not stored locally.
Examples:
>>> logos_shift = LogosShift(api_key="YOUR_API_KEY")
>>> logos_shift = LogosShift(api_key="YOUR_API_KEY", filename="api_calls.log")
"""
self.max_entries = max_entries
self.bohita_client = (
bohita_client if bohita_client else BohitaClient(api_key=api_key)
)
self.buffer_A, self.buffer_B = deque(), deque()
self.active_buffer = self.buffer_A
self.lock = threading.Lock()
self.buffer_manager = BufferManager(
bohita_client=self.bohita_client,
check_seconds=check_seconds,
filename=filename,
)
self.buffer_manager.register_buffer(self.buffer_A, self.lock)
self.buffer_manager.register_buffer(self.buffer_B, self.lock) | self.router = router if router else APIRouter(bohita_client=self.bohita_client) | 1 | 2023-10-20 00:00:38+00:00 | 8k |
kwonathan/language-models-trajectory-generators | env.py | [
{
"identifier": "Robot",
"path": "robot.py",
"snippet": "class Robot:\n\n def __init__(self, args):\n\n if args.robot == \"sawyer\":\n self.base_start_position = config.base_start_position_sawyer\n self.base_start_orientation_q = p.getQuaternionFromEuler(config.base_start_orientation_e_sawyer)\n self.joint_start_positions = config.joint_start_positions_sawyer\n self.id = p.loadURDF(\"sawyer_robot/sawyer_description/urdf/sawyer.urdf\", self.base_start_position, self.base_start_orientation_q, useFixedBase=True)\n self.robot = \"sawyer\"\n self.ee_index = config.ee_index_sawyer\n elif args.robot == \"franka\":\n self.base_start_position = config.base_start_position_franka\n self.base_start_orientation_q = p.getQuaternionFromEuler(config.base_start_orientation_e_franka)\n self.joint_start_positions = config.joint_start_positions_franka\n self.id = p.loadURDF(\"franka_robot/panda.urdf\", self.base_start_position, self.base_start_orientation_q, useFixedBase=True)\n self.robot = \"franka\"\n self.ee_index = config.ee_index_franka\n self.ee_start_position = config.ee_start_position\n self.ee_start_orientation_e = config.ee_start_orientation_e\n self.ee_current_position = config.ee_start_position\n self.ee_current_orientation_e = config.ee_start_orientation_e\n\n self.gripper_open = True\n self.trajectory_step = 1\n\n i = 0\n for j in range(p.getNumJoints(self.id)):\n joint_type = p.getJointInfo(self.id, j)[2]\n if joint_type == p.JOINT_PRISMATIC or joint_type == p.JOINT_REVOLUTE:\n p.resetJointState(self.id, j, self.joint_start_positions[i])\n i += 1\n\n\n\n def move(self, env, ee_target_position, ee_target_orientation_e, gripper_open, is_trajectory):\n\n if self.robot == \"sawyer\":\n gripper1_index = None\n gripper2_index = None\n gripper_target_position = config.gripper_goal_position_open_sawyer if gripper_open else config.gripper_goal_position_closed_sawyer\n elif self.robot == \"franka\":\n gripper1_index = 9\n gripper2_index = 10\n gripper_target_position = config.gripper_goal_position_open_franka if gripper_open else config.gripper_goal_position_closed_franka\n\n min_joint_positions = [p.getJointInfo(self.id, i)[8] for i in range(p.getNumJoints(self.id)) if p.getJointInfo(self.id, i)[2] == p.JOINT_PRISMATIC or p.getJointInfo(self.id, i)[2] == p.JOINT_REVOLUTE]\n max_joint_positions = [p.getJointInfo(self.id, i)[9] for i in range(p.getNumJoints(self.id)) if p.getJointInfo(self.id, i)[2] == p.JOINT_PRISMATIC or p.getJointInfo(self.id, i)[2] == p.JOINT_REVOLUTE]\n joint_ranges = [abs(max_joint_position - min_joint_position) for min_joint_position, max_joint_position in zip(min_joint_positions, max_joint_positions)]\n rest_poses = list((np.array(max_joint_positions) + np.array(min_joint_positions)) / 2)\n\n ee_target_orientation_q = p.getQuaternionFromEuler(ee_target_orientation_e)\n\n ee_current_position = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[0]\n ee_current_orientation_q = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[1]\n ee_current_orientation_e = p.getEulerFromQuaternion(ee_current_orientation_q)\n gripper1_current_position = p.getJointState(self.id, gripper1_index)[0]\n gripper2_current_position = p.getJointState(self.id, gripper2_index)[0]\n\n time_step = 0\n\n while (not (ee_current_position[0] <= ee_target_position[0] + config.margin_error and ee_current_position[0] >= ee_target_position[0] - config.margin_error and\n ee_current_position[1] <= ee_target_position[1] + config.margin_error and ee_current_position[1] >= ee_target_position[1] - config.margin_error and\n ee_current_position[2] <= ee_target_position[2] + config.margin_error and ee_current_position[2] >= ee_target_position[2] - config.margin_error and\n ee_current_orientation_e[0] <= ee_target_orientation_e[0] + config.margin_error and ee_current_orientation_e[0] >= ee_target_orientation_e[0] - config.margin_error and\n ee_current_orientation_e[1] <= ee_target_orientation_e[1] + config.margin_error and ee_current_orientation_e[1] >= ee_target_orientation_e[1] - config.margin_error and\n ee_current_orientation_e[2] <= ee_target_orientation_e[2] + config.margin_error and ee_current_orientation_e[2] >= ee_target_orientation_e[2] - config.margin_error and\n gripper1_current_position <= gripper_target_position + config.gripper_margin_error and gripper1_current_position >= gripper_target_position - config.gripper_margin_error and\n gripper2_current_position <= gripper_target_position + config.gripper_margin_error and gripper2_current_position >= gripper_target_position - config.gripper_margin_error)):\n\n target_joint_positions = p.calculateInverseKinematics(self.id, self.ee_index, ee_target_position, targetOrientation=ee_target_orientation_q, lowerLimits=min_joint_positions, upperLimits=max_joint_positions, jointRanges=joint_ranges, restPoses=rest_poses, maxNumIterations=500)\n\n if self.robot == \"sawyer\":\n pass\n elif self.robot == \"franka\":\n p.setJointMotorControlArray(self.id, range(7), p.POSITION_CONTROL, targetPositions=target_joint_positions[:-2], forces=[config.arm_movement_force_franka] * 7)\n p.setJointMotorControl2(self.id, gripper1_index, p.POSITION_CONTROL, targetPosition=gripper_target_position, force=config.gripper_movement_force_franka)\n p.setJointMotorControl2(self.id, gripper2_index, p.POSITION_CONTROL, targetPosition=gripper_target_position, force=config.gripper_movement_force_franka)\n\n env.update()\n self.get_camera_image(\"head\", env, save_camera_image=is_trajectory, rgb_image_path=config.rgb_image_trajectory_path.format(step=self.trajectory_step), depth_image_path=config.depth_image_trajectory_path.format(step=self.trajectory_step))\n if is_trajectory:\n self.trajectory_step += 1\n\n ee_current_position = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[0]\n ee_current_orientation_q = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[1]\n ee_current_orientation_e = p.getEulerFromQuaternion(ee_current_orientation_q)\n gripper1_new_position = p.getJointState(self.id, gripper1_index)[0]\n gripper2_new_position = p.getJointState(self.id, gripper2_index)[0]\n\n self.ee_current_position = ee_current_position\n self.ee_current_orientation_e = ee_current_orientation_e\n self.gripper_open = gripper_open\n\n if ((ee_current_position[0] <= ee_target_position[0] + config.margin_error and ee_current_position[0] >= ee_target_position[0] - config.margin_error and\n ee_current_position[1] <= ee_target_position[1] + config.margin_error and ee_current_position[1] >= ee_target_position[1] - config.margin_error and\n ee_current_position[2] <= ee_target_position[2] + config.margin_error and ee_current_position[2] >= ee_target_position[2] - config.margin_error and\n ee_current_orientation_e[0] <= ee_target_orientation_e[0] + config.margin_error and ee_current_orientation_e[0] >= ee_target_orientation_e[0] - config.margin_error and\n ee_current_orientation_e[1] <= ee_target_orientation_e[1] + config.margin_error and ee_current_orientation_e[1] >= ee_target_orientation_e[1] - config.margin_error and\n ee_current_orientation_e[2] <= ee_target_orientation_e[2] + config.margin_error and ee_current_orientation_e[2] >= ee_target_orientation_e[2] - config.margin_error) and\n (not gripper_open) and\n math.isclose(gripper1_new_position, gripper1_current_position, rel_tol=config.rel_tol, abs_tol=config.abs_tol) and\n math.isclose(gripper2_new_position, gripper2_current_position, rel_tol=config.rel_tol, abs_tol=config.abs_tol)):\n break\n\n gripper1_current_position = gripper1_new_position\n gripper2_current_position = gripper2_new_position\n\n time_step += 1\n\n if is_trajectory:\n if time_step > 0:\n break\n else:\n if time_step > 99:\n break\n\n\n\n def get_camera_image(self, camera, env, save_camera_image, rgb_image_path, depth_image_path):\n\n if camera == \"wrist\":\n camera_position = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[0]\n camera_orientation_q = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[1]\n elif camera == \"head\":\n camera_position = config.head_camera_position\n camera_orientation_q = p.getQuaternionFromEuler(config.head_camera_orientation_e)\n\n projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near_plane, far_plane)\n rotation_matrix = np.array(p.getMatrixFromQuaternion(camera_orientation_q)).reshape(3, 3)\n\n if camera == \"wrist\":\n init_camera_vector = [0, 0, 1]\n init_up_vector = [1, 0, 0]\n elif camera == \"head\":\n init_camera_vector = [0, 0, 1]\n init_up_vector = [-1, 0, 0]\n\n camera_vector = rotation_matrix.dot(init_camera_vector)\n up_vector = rotation_matrix.dot(init_up_vector)\n view_matrix = p.computeViewMatrix(camera_position, camera_position + camera_vector, up_vector)\n\n image = p.getCameraImage(config.image_width, config.image_height, viewMatrix=view_matrix, projectionMatrix=projection_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL)\n\n rgb_buffer = image[2]\n depth_buffer = image[3]\n\n if save_camera_image:\n rgb_image = Image.fromarray(rgb_buffer)\n rgb_image.save(rgb_image_path)\n\n n = config.near_plane\n f = config.far_plane\n depth_array = 2 * n * f / (f + n - (2 * depth_buffer - 1.0) * (f - n))\n\n save_image(torch.Tensor(depth_array), depth_image_path)\n\n return camera_position, camera_orientation_q"
},
{
"identifier": "OK",
"path": "config.py",
"snippet": "OK = \"\\033[92m\""
},
{
"identifier": "PROGRESS",
"path": "config.py",
"snippet": "PROGRESS = \"\\033[93m\""
},
{
"identifier": "FAIL",
"path": "config.py",
"snippet": "FAIL = \"\\033[91m\""
},
{
"identifier": "ENDC",
"path": "config.py",
"snippet": "ENDC = \"\\033[0m\""
},
{
"identifier": "CAPTURE_IMAGES",
"path": "config.py",
"snippet": "CAPTURE_IMAGES = 1"
},
{
"identifier": "ADD_BOUNDING_CUBES",
"path": "config.py",
"snippet": "ADD_BOUNDING_CUBES = 2"
},
{
"identifier": "ADD_TRAJECTORY_POINTS",
"path": "config.py",
"snippet": "ADD_TRAJECTORY_POINTS = 3"
},
{
"identifier": "EXECUTE_TRAJECTORY",
"path": "config.py",
"snippet": "EXECUTE_TRAJECTORY = 4"
},
{
"identifier": "OPEN_GRIPPER",
"path": "config.py",
"snippet": "OPEN_GRIPPER = 5"
},
{
"identifier": "CLOSE_GRIPPER",
"path": "config.py",
"snippet": "CLOSE_GRIPPER = 6"
},
{
"identifier": "TASK_COMPLETED",
"path": "config.py",
"snippet": "TASK_COMPLETED = 7"
},
{
"identifier": "RESET_ENVIRONMENT",
"path": "config.py",
"snippet": "RESET_ENVIRONMENT = 8"
}
] | import pybullet as p
import numpy as np
import pybullet_data
import time
import config
from robot import Robot
from config import OK, PROGRESS, FAIL, ENDC
from config import CAPTURE_IMAGES, ADD_BOUNDING_CUBES, ADD_TRAJECTORY_POINTS, EXECUTE_TRAJECTORY, OPEN_GRIPPER, CLOSE_GRIPPER, TASK_COMPLETED, RESET_ENVIRONMENT | 4,110 |
class Environment:
def __init__(self, args):
self.mode = args.mode
def load(self):
p.resetDebugVisualizerCamera(config.camera_distance, config.camera_yaw, config.camera_pitch, config.camera_target_position)
object_start_position = config.object_start_position
object_start_orientation_q = p.getQuaternionFromEuler(config.object_start_orientation_e)
object_model = p.loadURDF("ycb_assets/002_master_chef_can.urdf", object_start_position, object_start_orientation_q, useFixedBase=False, globalScaling=config.global_scaling)
if self.mode == "default":
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0)
def update(self):
p.stepSimulation()
time.sleep(config.control_dt)
def run_simulation_environment(args, env_connection, logger):
# Environment set-up
logger.info(PROGRESS + "Setting up environment..." + ENDC)
physics_client = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0, 0, -9.81)
plane = p.loadURDF("plane.urdf")
env = Environment(args)
env.load()
robot = Robot(args)
robot.move(env, robot.ee_start_position, robot.ee_start_orientation_e, gripper_open=True, is_trajectory=False)
env_connection_message = OK + "Finished setting up environment!" + ENDC
env_connection.send([env_connection_message])
while True:
if env_connection.poll():
env_connection_received = env_connection.recv()
if env_connection_received[0] == CAPTURE_IMAGES:
_, _ = robot.get_camera_image("head", env, save_camera_image=True, rgb_image_path=config.rgb_image_trajectory_path.format(step=0), depth_image_path=config.depth_image_trajectory_path.format(step=0))
head_camera_position, head_camera_orientation_q = robot.get_camera_image("head", env, save_camera_image=True, rgb_image_path=config.rgb_image_head_path, depth_image_path=config.depth_image_head_path)
wrist_camera_position, wrist_camera_orientation_q = robot.get_camera_image("wrist", env, save_camera_image=True, rgb_image_path=config.rgb_image_wrist_path, depth_image_path=config.depth_image_wrist_path)
env_connection_message = OK + "Finished capturing head camera image!" + ENDC
env_connection.send([head_camera_position, head_camera_orientation_q, wrist_camera_position, wrist_camera_orientation_q, env_connection_message])
elif env_connection_received[0] == ADD_BOUNDING_CUBES:
bounding_cubes_world_coordinates = env_connection_received[1]
for bounding_cube_world_coordinates in bounding_cubes_world_coordinates:
p.addUserDebugLine(bounding_cube_world_coordinates[0], bounding_cube_world_coordinates[1], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[1], bounding_cube_world_coordinates[2], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[2], bounding_cube_world_coordinates[3], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[3], bounding_cube_world_coordinates[0], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[5], bounding_cube_world_coordinates[6], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[6], bounding_cube_world_coordinates[7], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[7], bounding_cube_world_coordinates[8], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[8], bounding_cube_world_coordinates[5], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[0], bounding_cube_world_coordinates[5], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[1], bounding_cube_world_coordinates[6], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[2], bounding_cube_world_coordinates[7], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[3], bounding_cube_world_coordinates[8], [0, 1, 0], lifeTime=0)
p.addUserDebugPoints(bounding_cube_world_coordinates, [[0, 1, 0]] * len(bounding_cube_world_coordinates), pointSize=5, lifeTime=0)
env_connection_message = OK + "Finished adding bounding cubes to the environment!" + ENDC
env_connection.send([env_connection_message])
elif env_connection_received[0] == ADD_TRAJECTORY_POINTS:
trajectory = env_connection_received[1]
trajectory_points = [point[:3] for point in trajectory]
p.addUserDebugPoints(trajectory_points, [[0, 1, 1]] * len(trajectory_points), pointSize=5, lifeTime=0)
logger.info(OK + "Finished adding trajectory points to the environment!" + ENDC)
elif env_connection_received[0] == EXECUTE_TRAJECTORY:
trajectory = env_connection_received[1]
for point in trajectory:
robot.move(env, point[:3], np.array(robot.ee_start_orientation_e) + np.array([0, 0, point[3]]), gripper_open=robot.gripper_open, is_trajectory=True)
for _ in range(100):
env.update()
logger.info(OK + "Finished executing generated trajectory!" + ENDC)
|
class Environment:
def __init__(self, args):
self.mode = args.mode
def load(self):
p.resetDebugVisualizerCamera(config.camera_distance, config.camera_yaw, config.camera_pitch, config.camera_target_position)
object_start_position = config.object_start_position
object_start_orientation_q = p.getQuaternionFromEuler(config.object_start_orientation_e)
object_model = p.loadURDF("ycb_assets/002_master_chef_can.urdf", object_start_position, object_start_orientation_q, useFixedBase=False, globalScaling=config.global_scaling)
if self.mode == "default":
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0)
def update(self):
p.stepSimulation()
time.sleep(config.control_dt)
def run_simulation_environment(args, env_connection, logger):
# Environment set-up
logger.info(PROGRESS + "Setting up environment..." + ENDC)
physics_client = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0, 0, -9.81)
plane = p.loadURDF("plane.urdf")
env = Environment(args)
env.load()
robot = Robot(args)
robot.move(env, robot.ee_start_position, robot.ee_start_orientation_e, gripper_open=True, is_trajectory=False)
env_connection_message = OK + "Finished setting up environment!" + ENDC
env_connection.send([env_connection_message])
while True:
if env_connection.poll():
env_connection_received = env_connection.recv()
if env_connection_received[0] == CAPTURE_IMAGES:
_, _ = robot.get_camera_image("head", env, save_camera_image=True, rgb_image_path=config.rgb_image_trajectory_path.format(step=0), depth_image_path=config.depth_image_trajectory_path.format(step=0))
head_camera_position, head_camera_orientation_q = robot.get_camera_image("head", env, save_camera_image=True, rgb_image_path=config.rgb_image_head_path, depth_image_path=config.depth_image_head_path)
wrist_camera_position, wrist_camera_orientation_q = robot.get_camera_image("wrist", env, save_camera_image=True, rgb_image_path=config.rgb_image_wrist_path, depth_image_path=config.depth_image_wrist_path)
env_connection_message = OK + "Finished capturing head camera image!" + ENDC
env_connection.send([head_camera_position, head_camera_orientation_q, wrist_camera_position, wrist_camera_orientation_q, env_connection_message])
elif env_connection_received[0] == ADD_BOUNDING_CUBES:
bounding_cubes_world_coordinates = env_connection_received[1]
for bounding_cube_world_coordinates in bounding_cubes_world_coordinates:
p.addUserDebugLine(bounding_cube_world_coordinates[0], bounding_cube_world_coordinates[1], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[1], bounding_cube_world_coordinates[2], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[2], bounding_cube_world_coordinates[3], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[3], bounding_cube_world_coordinates[0], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[5], bounding_cube_world_coordinates[6], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[6], bounding_cube_world_coordinates[7], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[7], bounding_cube_world_coordinates[8], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[8], bounding_cube_world_coordinates[5], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[0], bounding_cube_world_coordinates[5], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[1], bounding_cube_world_coordinates[6], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[2], bounding_cube_world_coordinates[7], [0, 1, 0], lifeTime=0)
p.addUserDebugLine(bounding_cube_world_coordinates[3], bounding_cube_world_coordinates[8], [0, 1, 0], lifeTime=0)
p.addUserDebugPoints(bounding_cube_world_coordinates, [[0, 1, 0]] * len(bounding_cube_world_coordinates), pointSize=5, lifeTime=0)
env_connection_message = OK + "Finished adding bounding cubes to the environment!" + ENDC
env_connection.send([env_connection_message])
elif env_connection_received[0] == ADD_TRAJECTORY_POINTS:
trajectory = env_connection_received[1]
trajectory_points = [point[:3] for point in trajectory]
p.addUserDebugPoints(trajectory_points, [[0, 1, 1]] * len(trajectory_points), pointSize=5, lifeTime=0)
logger.info(OK + "Finished adding trajectory points to the environment!" + ENDC)
elif env_connection_received[0] == EXECUTE_TRAJECTORY:
trajectory = env_connection_received[1]
for point in trajectory:
robot.move(env, point[:3], np.array(robot.ee_start_orientation_e) + np.array([0, 0, point[3]]), gripper_open=robot.gripper_open, is_trajectory=True)
for _ in range(100):
env.update()
logger.info(OK + "Finished executing generated trajectory!" + ENDC)
| elif env_connection_received[0] == OPEN_GRIPPER: | 9 | 2023-10-18 16:38:09+00:00 | 8k |
kvablack/susie | scripts/train.py | [
{
"identifier": "sampling",
"path": "susie/sampling.py",
"snippet": "def q_sample(x_0, log_snr, noise):\ndef model_predict(state, x, y, prompt_embeds, t, use_ema=True):\ndef sample_step(\n rng,\n state,\n x,\n y,\n prompt_embeds,\n uncond_y,\n uncond_prompt_embeds,\n t,\n t_next,\n log_snr_fn,\n context_w,\n prompt_w,\n eta,\n):\ndef sample_loop(\n rng,\n state,\n y,\n prompt_embeds,\n uncond_y,\n uncond_prompt_embeds,\n *,\n log_snr_fn,\n num_timesteps,\n context_w=1.0,\n prompt_w=1.0,\n eta=0.0,\n):\n def scan_fn(carry, t_combined):"
},
{
"identifier": "scheduling",
"path": "susie/scheduling.py",
"snippet": "def lnpoch(a, b):\ndef linear_log_snr(t, *, beta_start=0.001, beta_end=0.02, num_timesteps=1000):\ndef scaled_linear_log_snr(t, *, beta_start=0.00085, beta_end=0.012, num_timesteps=1000):\ndef cosine_log_snr(t, s: float = 0.008, d: float = 0.008):\ndef create_log_snr_fn(config):\ndef create_ema_decay_fn(config):\n def ema_decay_schedule(step):"
},
{
"identifier": "get_data_loader",
"path": "susie/data/datasets.py",
"snippet": "def get_data_loader(data_config, tokenize_fn, mesh=None):\n data_config = dict(data_config)\n batch_size = data_config.pop(\"batch_size\")\n\n train_datasets = []\n val_datasets = []\n weights = []\n for data_name, data_kwargs in data_config.items():\n data_kwargs = dict(data_kwargs)\n weights.append(float(data_kwargs.pop(\"weight\")))\n train_datasets.append(make_dataset(data_name, train=True, **data_kwargs))\n val_datasets.append(make_dataset(data_name, train=False, **data_kwargs))\n\n train = dl.DLataset.sample_from_datasets(\n train_datasets, weights=weights, stop_on_empty_dataset=True\n ).batch(batch_size, num_parallel_calls=tf.data.AUTOTUNE)\n val = dl.DLataset.sample_from_datasets(\n val_datasets, weights=weights, stop_on_empty_dataset=True\n ).batch(batch_size, num_parallel_calls=tf.data.AUTOTUNE)\n\n def shard(batch):\n return multihost_utils.host_local_array_to_global_array(\n batch,\n mesh,\n P((\"dp\", \"fsdp\")),\n )\n\n # WARNING: for some reason any amount of prefetching is also a total no-go in terms of memory usage...\n train = map(tokenize_fn, train.as_numpy_iterator())\n val = map(tokenize_fn, val.as_numpy_iterator())\n\n if mesh:\n return map(shard, train), map(shard, val), len(train_datasets)\n else:\n return train, val, len(train_datasets)"
},
{
"identifier": "host_broadcast_str",
"path": "susie/jax_utils.py",
"snippet": "def host_broadcast_str(x: str) -> str:\n \"\"\"Broadcast_one_to_all, but with a string. Strings should all be the same length.\"\"\"\n multihost_utils.assert_equal(\n len(x), f\"String lengths are not equal: got {len(x)} for {jax.process_index()}\"\n )\n encoded = np.array([ord(c) for c in x], dtype=np.uint8)\n encoded = multihost_utils.broadcast_one_to_all(encoded)\n return \"\".join([chr(u) for u in encoded])"
},
{
"identifier": "initialize_compilation_cache",
"path": "susie/jax_utils.py",
"snippet": "def initialize_compilation_cache(path=os.path.expanduser(\"~/.jax_compilation_cache\")):\n \"\"\"Initializes the Jax persistent compilation cache.\"\"\"\n compilation_cache.initialize_cache(path)\n for logger in [logging.getLogger(name) for name in logging.root.manager.loggerDict]:\n logger.addFilter(\n lambda record: \"Not writing persistent cache entry for\"\n not in record.getMessage()\n and \"Persistent compilation cache hit for\" not in record.getMessage()\n and \"to persistent compilation cache with key\" not in record.getMessage()\n )"
},
{
"identifier": "EmaTrainState",
"path": "susie/model.py",
"snippet": "class EmaTrainState(TrainState):\n params_ema: FrozenDict[str, Any]\n\n @partial(jax.jit, donate_argnums=0)\n def apply_ema_decay(self, ema_decay):\n params_ema = jax.tree_map(\n lambda p_ema, p: p_ema * ema_decay + p * (1.0 - ema_decay),\n self.params_ema,\n self.params,\n )\n return self.replace(params_ema=params_ema)"
},
{
"identifier": "create_model_def",
"path": "susie/model.py",
"snippet": "def create_model_def(config: dict) -> FlaxUNet2DConditionModel:\n model, unused_kwargs = FlaxUNet2DConditionModel.from_config(\n dict(config), return_unused_kwargs=True\n )\n if unused_kwargs:\n logging.warning(f\"FlaxUNet2DConditionModel unused kwargs: {unused_kwargs}\")\n # monkey-patch __call__ to use channels-last\n model.__call__ = lambda self, sample, *args, **kwargs: eo.rearrange(\n FlaxUNet2DConditionModel.__call__(\n self, eo.rearrange(sample, \"b h w c -> b c h w\"), *args, **kwargs\n ).sample,\n \"b c h w -> b h w c\",\n )\n return model"
},
{
"identifier": "load_pretrained_unet",
"path": "susie/model.py",
"snippet": "def load_pretrained_unet(\n path: str, in_channels: int\n) -> Tuple[FlaxUNet2DConditionModel, dict]:\n model_def, params = FlaxUNet2DConditionModel.from_pretrained(\n path, dtype=np.float32, subfolder=\"unet\"\n )\n\n # same issue, they commit the params to the CPU, which totally messes stuff\n # up downstream...\n params = jax.device_get(params)\n\n # add extra parameters to conv_in if necessary\n old_conv_in = params[\"conv_in\"][\"kernel\"]\n h, w, cin, cout = old_conv_in.shape\n logging.info(f\"Adding {in_channels - cin} channels to conv_in\")\n params[\"conv_in\"][\"kernel\"] = np.zeros(\n (h, w, in_channels, cout), dtype=old_conv_in.dtype\n )\n params[\"conv_in\"][\"kernel\"][:, :, :cin, :] = old_conv_in\n\n # monkey-patch __call__ to use channels-last\n model_def.__call__ = lambda self, sample, *args, **kwargs: eo.rearrange(\n FlaxUNet2DConditionModel.__call__(\n self, eo.rearrange(sample, \"b h w c -> b c h w\"), *args, **kwargs\n ).sample,\n \"b c h w -> b h w c\",\n )\n\n return model_def, params"
},
{
"identifier": "load_text_encoder",
"path": "susie/model.py",
"snippet": "def load_text_encoder(\n path: str,\n) -> Tuple[\n Callable[[List[str]], np.ndarray],\n Callable[[np.ndarray], List[str]],\n Callable[[jax.Array], jax.Array],\n]:\n if \":\" in path:\n path, revision = path.split(\":\")\n else:\n revision = None\n text_encoder = FlaxCLIPTextModel.from_pretrained(\n path, subfolder=\"text_encoder\", revision=revision\n )\n tokenizer = CLIPTokenizer.from_pretrained(\n path, subfolder=\"tokenizer\", revision=revision\n )\n\n def tokenize(s: List[str]) -> np.ndarray:\n return tokenizer(s, padding=\"max_length\", return_tensors=\"np\").input_ids\n\n untokenize = partial(tokenizer.batch_decode, skip_special_tokens=True)\n\n @jax.jit\n def text_encode(params, prompt_ids):\n return text_encoder(prompt_ids, params=params)[0]\n\n return tokenize, untokenize, partial(text_encode, text_encoder.params)"
},
{
"identifier": "load_vae",
"path": "susie/model.py",
"snippet": "def load_vae(\n path: str,\n) -> Tuple[\n Callable[[jax.Array, jax.Array, bool], jax.Array],\n Callable[[jax.Array, bool], jax.Array],\n]:\n if \":\" in path:\n path, revision = path.split(\":\")\n else:\n revision = None\n vae, vae_params = FlaxAutoencoderKL.from_pretrained(\n path, subfolder=\"vae\", revision=revision\n )\n # monkey-patch encode to use channels-last (it returns a FlaxDiagonalGaussianDistribution object, which is already\n # channels-last)\n vae.encode = lambda self, sample, *args, **kwargs: FlaxAutoencoderKL.encode(\n self, eo.rearrange(sample, \"b h w c -> b c h w\"), *args, **kwargs\n ).latent_dist\n\n # monkey-patch decode to use channels-last (it already accepts channels-last input)\n vae.decode = lambda self, latents, *args, **kwargs: eo.rearrange(\n FlaxAutoencoderKL.decode(self, latents, *args, **kwargs).sample,\n \"b c h w -> b h w c\",\n )\n\n # HuggingFace places vae_params committed onto the CPU -_-\n # this one took me awhile to figure out...\n vae_params = jax.device_get(vae_params)\n\n @jax.jit\n def vae_encode(vae_params, key, sample, scale=False):\n # handle the case where `sample` is multiple images stacked\n batch_size = sample.shape[0]\n sample = eo.rearrange(sample, \"n h w (x c) -> (n x) h w c\", c=3)\n latents = vae.apply({\"params\": vae_params}, sample, method=vae.encode).sample(\n key\n )\n latents = eo.rearrange(latents, \"(n x) h w c -> n h w (x c)\", n=batch_size)\n latents = jax.lax.cond(\n scale, lambda: latents * vae.config.scaling_factor, lambda: latents\n )\n return latents\n\n @jax.jit\n def vae_decode(vae_params, latents, scale=True):\n # handle the case where `latents` is multiple images stacked\n batch_size = latents.shape[0]\n latents = eo.rearrange(\n latents, \"n h w (x c) -> (n x) h w c\", c=vae.config.latent_channels\n )\n latents = jax.lax.cond(\n scale, lambda: latents / vae.config.scaling_factor, lambda: latents\n )\n sample = vae.apply({\"params\": vae_params}, latents, method=vae.decode)\n sample = eo.rearrange(sample, \"(n x) h w c -> n h w (x c)\", n=batch_size)\n return sample\n\n return partial(vae_encode, vae_params), partial(vae_decode, vae_params)"
}
] | import datetime
import functools
import logging
import os
import tempfile
import time
import einops as eo
import jax
import jax.numpy as jnp
import numpy as np
import optax
import orbax.checkpoint
import tensorflow as tf
import tqdm
import wandb
from collections import defaultdict
from functools import partial
from absl import app, flags
from flax.training import orbax_utils
from jax.experimental import multihost_utils
from jax.lax import with_sharding_constraint as wsc
from jax.sharding import NamedSharding
from jax.sharding import PartitionSpec as P
from ml_collections import ConfigDict, config_flags
from PIL import Image
from susie import sampling, scheduling
from susie.data.datasets import get_data_loader
from susie.jax_utils import (
host_broadcast_str,
initialize_compilation_cache,
)
from susie.model import (
EmaTrainState,
create_model_def,
load_pretrained_unet,
load_text_encoder,
load_vae,
)
from jax_smi import initialise_tracking # type: ignore | 4,621 |
# seems like remat is actually enabled by default -- this disables it
# @partial(jax.checkpoint, policy=jax.checkpoint_policies.everything_saveable)
def loss_fn(params, rng):
pred = state.apply_fn(
{"params": params},
input,
t * 1000,
prompt_embeds,
train=not eval_only,
rngs={"dropout": rng},
)
assert pred.shape == noise.shape
loss = (pred - noise) ** 2
return jnp.mean(loss)
info = {}
if not eval_only:
grad_fn = jax.value_and_grad(loss_fn)
rng, dropout_rng = jax.random.split(rng)
info["loss"], grads = grad_fn(state.params, dropout_rng)
info["grad_norm"] = optax.global_norm(grads)
new_state = state.apply_gradients(grads=grads)
else:
rng, dropout_rng = jax.random.split(rng)
info["loss"] = loss_fn(state.params, dropout_rng)
rng, dropout_rng = jax.random.split(rng)
info["loss_ema"] = loss_fn(state.params_ema, dropout_rng)
new_state = state
return new_state, info
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
"config",
None,
"File path to the hyperparameter configuration.",
lock_config=False,
)
def main(_):
config = FLAGS.config
assert config.sample.num_contexts % 4 == 0
# prevent tensorflow from using GPUs
tf.config.experimental.set_visible_devices([], "GPU")
tf.random.set_seed(config.seed + jax.process_index())
# get jax devices
logging.info(f"JAX process: {jax.process_index()} of {jax.process_count()}")
logging.info(f"Local devices: {jax.local_device_count()}")
logging.info(f"Global devices: {jax.device_count()}")
mesh = jax.sharding.Mesh(
# create_device_mesh([32, 1]), # can't make contiguous meshes for the v4-64 pod for some reason
np.array(jax.devices()).reshape(*config.mesh),
axis_names=["dp", "fsdp"],
)
replicated_sharding = NamedSharding(mesh, P())
# data gets sharded over both dp and fsdp logical axes
data_sharding = NamedSharding(mesh, P(["dp", "fsdp"]))
# initial rng
rng = jax.random.PRNGKey(config.seed + jax.process_index())
# set up wandb run
if config.wandb_resume_id is not None:
run = wandb.Api().run(config.wandb_resume_id)
old_num_steps = config.num_steps
config = ConfigDict(run.config)
config.num_steps = old_num_steps
config.wandb_resume_id = run.id
logdir = tf.io.gfile.join(config.logdir, run.name)
if jax.process_index() == 0:
wandb.init(
project=run.project,
id=run.id,
resume="must",
)
else:
unique_id = datetime.datetime.now().strftime("%Y.%m.%d_%H.%M.%S")
unique_id = host_broadcast_str(unique_id)
if not config.run_name:
config.run_name = unique_id
else:
config.run_name += "_" + unique_id
logdir = tf.io.gfile.join(config.logdir, config.run_name)
if jax.process_index() == 0:
tf.io.gfile.makedirs(logdir)
wandb.init(
project=config.wandb_project,
name=config.run_name,
config=config.to_dict(),
)
checkpointer = orbax.checkpoint.CheckpointManager(
logdir,
checkpointers={
"state": orbax.checkpoint.PyTreeCheckpointer(),
"params_ema": orbax.checkpoint.PyTreeCheckpointer(),
},
)
log_snr_fn = scheduling.create_log_snr_fn(config.scheduling)
ema_decay_fn = scheduling.create_ema_decay_fn(config.ema)
# load vae
if config.vae is not None:
vae_encode, vae_decode = load_vae(config.vae)
# load text encoder
|
if jax.process_count() > 1:
jax.distributed.initialize()
tqdm = partial(tqdm.tqdm, dynamic_ncols=True)
try:
initialise_tracking()
except ImportError:
pass
def fsdp_sharding(mesh: jax.sharding.Mesh, array: jax.ShapeDtypeStruct):
if array.ndim < 2:
# replicate scalar and vector arrays
return NamedSharding(mesh, P())
# shard matrices and larger tensors across the fsdp dimension. the conv kernels are a little tricky because they
# vary in which axis is a power of 2, so I'll just search for the first one that works.
l = []
for n in array.shape:
if n % mesh.shape["fsdp"] == 0:
l.append("fsdp")
return NamedSharding(mesh, P(*l))
l.append(None)
logging.warning(
f"Could not find a valid sharding for array of shape {array.shape} with mesh of shape {mesh.shape}"
)
return NamedSharding(mesh, P())
def train_step(
rng,
state,
batch,
# static args
log_snr_fn,
uncond_prompt_embed,
text_encode_fn,
vae_encode_fn,
curr_drop_rate=0.0,
goal_drop_rate=0.0,
prompt_drop_rate=0.0,
eval_only=False,
):
batch_size = batch["subgoals"].shape[0]
# encode stuff
for key in {"curr", "goals", "subgoals"}.intersection(batch.keys()):
# VERY IMPORTANT: for some godforsaken reason, the context latents are
# NOT scaled in InstructPix2Pix
scale = key == "subgoals"
rng, encode_rng = jax.random.split(rng)
batch[key] = vae_encode_fn(encode_rng, batch[key], scale=scale)
prompt_embeds = text_encode_fn(batch["prompt_ids"])
if goal_drop_rate == 1.0:
batch["goals"] = jnp.zeros(
batch["subgoals"].shape[:-1] + (0,), batch["subgoals"].dtype
)
elif goal_drop_rate > 0:
rng, mask_rng = jax.random.split(rng)
batch["goals"] = jnp.where(
jax.random.uniform(mask_rng, shape=(batch_size, 1, 1, 1)) < goal_drop_rate,
0,
batch["goals"],
)
if curr_drop_rate > 0:
rng, mask_rng = jax.random.split(rng)
batch["curr"] = jnp.where(
jax.random.uniform(mask_rng, shape=(batch_size, 1, 1, 1)) < curr_drop_rate,
0,
batch["curr"],
)
if prompt_drop_rate > 0:
rng, mask_rng = jax.random.split(rng)
prompt_embeds = jnp.where(
jax.random.uniform(mask_rng, shape=(batch_size, 1, 1)) < prompt_drop_rate,
uncond_prompt_embed,
prompt_embeds,
)
x = batch["subgoals"] # the generation target
y = jnp.concatenate(
[batch["curr"], batch["goals"]], axis=-1
) # the conditioning image(s)
# sample batch of timesteps from t ~ U[0, num_train_timesteps)
rng, t_rng = jax.random.split(rng)
t = jax.random.uniform(t_rng, shape=(batch_size,), dtype=jnp.float32)
# sample noise (epsilon) from N(0, I)
rng, noise_rng = jax.random.split(rng)
noise = jax.random.normal(noise_rng, x.shape)
log_snr = log_snr_fn(t)
# generate the noised image from q(x_t | x_0, y)
x_t = sampling.q_sample(x, log_snr, noise)
input = jnp.concatenate([x_t, y], axis=-1)
# seems like remat is actually enabled by default -- this disables it
# @partial(jax.checkpoint, policy=jax.checkpoint_policies.everything_saveable)
def loss_fn(params, rng):
pred = state.apply_fn(
{"params": params},
input,
t * 1000,
prompt_embeds,
train=not eval_only,
rngs={"dropout": rng},
)
assert pred.shape == noise.shape
loss = (pred - noise) ** 2
return jnp.mean(loss)
info = {}
if not eval_only:
grad_fn = jax.value_and_grad(loss_fn)
rng, dropout_rng = jax.random.split(rng)
info["loss"], grads = grad_fn(state.params, dropout_rng)
info["grad_norm"] = optax.global_norm(grads)
new_state = state.apply_gradients(grads=grads)
else:
rng, dropout_rng = jax.random.split(rng)
info["loss"] = loss_fn(state.params, dropout_rng)
rng, dropout_rng = jax.random.split(rng)
info["loss_ema"] = loss_fn(state.params_ema, dropout_rng)
new_state = state
return new_state, info
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
"config",
None,
"File path to the hyperparameter configuration.",
lock_config=False,
)
def main(_):
config = FLAGS.config
assert config.sample.num_contexts % 4 == 0
# prevent tensorflow from using GPUs
tf.config.experimental.set_visible_devices([], "GPU")
tf.random.set_seed(config.seed + jax.process_index())
# get jax devices
logging.info(f"JAX process: {jax.process_index()} of {jax.process_count()}")
logging.info(f"Local devices: {jax.local_device_count()}")
logging.info(f"Global devices: {jax.device_count()}")
mesh = jax.sharding.Mesh(
# create_device_mesh([32, 1]), # can't make contiguous meshes for the v4-64 pod for some reason
np.array(jax.devices()).reshape(*config.mesh),
axis_names=["dp", "fsdp"],
)
replicated_sharding = NamedSharding(mesh, P())
# data gets sharded over both dp and fsdp logical axes
data_sharding = NamedSharding(mesh, P(["dp", "fsdp"]))
# initial rng
rng = jax.random.PRNGKey(config.seed + jax.process_index())
# set up wandb run
if config.wandb_resume_id is not None:
run = wandb.Api().run(config.wandb_resume_id)
old_num_steps = config.num_steps
config = ConfigDict(run.config)
config.num_steps = old_num_steps
config.wandb_resume_id = run.id
logdir = tf.io.gfile.join(config.logdir, run.name)
if jax.process_index() == 0:
wandb.init(
project=run.project,
id=run.id,
resume="must",
)
else:
unique_id = datetime.datetime.now().strftime("%Y.%m.%d_%H.%M.%S")
unique_id = host_broadcast_str(unique_id)
if not config.run_name:
config.run_name = unique_id
else:
config.run_name += "_" + unique_id
logdir = tf.io.gfile.join(config.logdir, config.run_name)
if jax.process_index() == 0:
tf.io.gfile.makedirs(logdir)
wandb.init(
project=config.wandb_project,
name=config.run_name,
config=config.to_dict(),
)
checkpointer = orbax.checkpoint.CheckpointManager(
logdir,
checkpointers={
"state": orbax.checkpoint.PyTreeCheckpointer(),
"params_ema": orbax.checkpoint.PyTreeCheckpointer(),
},
)
log_snr_fn = scheduling.create_log_snr_fn(config.scheduling)
ema_decay_fn = scheduling.create_ema_decay_fn(config.ema)
# load vae
if config.vae is not None:
vae_encode, vae_decode = load_vae(config.vae)
# load text encoder | tokenize, untokenize, text_encode = load_text_encoder(config.text_encoder) | 8 | 2023-10-17 05:05:57+00:00 | 8k |
mlvlab/Flipped-VQA | llama_vqa.py | [
{
"identifier": "ModelArgs",
"path": "llama/model.py",
"snippet": "class ModelArgs:\n dim: int = 512\n n_layers: int = 8\n n_heads: int = 8\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2\n norm_eps: float = 1e-5\n\n max_batch_size: int = 32\n max_seq_len: int = 2048\n adapter_len: int=10\n adapter_layer: int=30"
},
{
"identifier": "Transformer",
"path": "llama/model.py",
"snippet": "class Transformer(nn.Module):\n def __init__(self, params: ModelArgs, args):\n super().__init__()\n params.max_feats = args.max_feats\n params.bias = args.bias\n self.args = args\n self.params = params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n self.max_feats = args.max_feats\n\n\n self.tok_embeddings = Embedding(params.vocab_size, params.dim)\n\n self.adapter_query = Embedding(params.adapter_len * params.adapter_layer, params.dim)\n self.visual_proj = Linear(768, params.dim, bias=False)\n self.temporal_emb = Embedding(self.max_feats, params.dim)\n self.adapter_len = params.adapter_len\n self.adapter_layer = params.adapter_layer\n\n self.vqa_criterion = torch.nn.CrossEntropyLoss(ignore_index=0)\n self.vaq_criterion = torch.nn.CrossEntropyLoss(ignore_index=0)\n self.qav_criterion = torch.nn.CrossEntropyLoss(ignore_index=-1)\n self.inference_criterion = torch.nn.CrossEntropyLoss(ignore_index=0, reduction='none')\n\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(TransformerBlock(layer_id, params))\n\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = Linear(params.dim, params.vocab_size, bias=False)\n\n self.freqs_cis = precompute_freqs_cis(self.params.dim // self.params.n_heads, self.params.max_seq_len * 2)\n\n self.video_label = torch.arange(1, self.max_feats)\n self.tau = args.tau\n\n def forward(self, data, inference=False):\n video = data['video'].cuda()\n vqa_id, vaq_id, qav_id = data['text_id']['vqa'].cuda(), data['text_id']['vaq'].cuda(), data['text_id']['qav'].cuda()\n vqa_label, vaq_label, qav_label = data['label']['vqa'].cuda(), data['label']['vaq'].cuda(), data['label']['qav'].cuda()\n vqa_video_start, vaq_video_start, qav_video_index = data['video_start']['vqa'][0], data['video_start']['vaq'][0], data['video_index']['qav'].cuda()\n \n bsz, n_options, seqlen = vqa_id.shape\n vqa_id, vaq_id = vqa_id.reshape(-1, seqlen), vaq_id.reshape(-1, seqlen)\n vqa_label, vaq_label = vqa_label.reshape(-1, seqlen), vaq_label.reshape(-1, seqlen)\n vqa_label, vaq_label = vqa_label[:, 1:].flatten(), vaq_label[:, 1:].flatten()\n \n qav_id = qav_id.reshape(-1, seqlen)\n qav_label = qav_label.reshape(-1, seqlen)\n qav_video_mask = qav_label.ge(0)\n qav_label = qav_label[:, 1:].flatten()\n \n \n with torch.no_grad():\n vqa_h = self.tok_embeddings(vqa_id)\n \n if self.args.vaq and not inference:\n vaq_h = self.tok_embeddings(vaq_id)\n \n if self.args.qav and not inference:\n qav_h = self.tok_embeddings(qav_id)\n \n freqs_cis = self.freqs_cis.to(vqa_h.device)\n freqs_cis = freqs_cis[:seqlen]\n mask = None\n mask = torch.full((1, 1, seqlen, seqlen), float(\"-inf\"), device=vqa_h.device)\n mask = torch.triu(mask, diagonal=0 + 1).type_as(vqa_h)\n start_pos = 0\n vaq_loss, qav_loss = torch.tensor([0]).cuda(), torch.tensor([0]).cuda()\n \n adapter = self.adapter_query.weight.reshape(-1, self.adapter_len, self.params.dim).unsqueeze(1)\n _video_feature = self.visual_proj(video)\n if inference:\n _video_feature = _video_feature.unsqueeze(1).repeat(1, n_options, 1, 1).view(-1, _video_feature.shape[-2], _video_feature.shape[-1])\n video_feature = (_video_feature + self.temporal_emb.weight[None, :, :]).half()\n \n vqa_h = vqa_h.clone()\n vqa_h[:, vqa_video_start:vqa_video_start+self.max_feats] = video_feature\n\n \n if self.args.vaq and not inference:\n vaq_h = vaq_h.clone()\n vaq_h[:, vaq_video_start:vaq_video_start+self.max_feats] = video_feature\n \n if self.args.qav and not inference:\n qav_h = qav_h * ~qav_video_mask[..., None]\n qav_h.scatter_add_(1, qav_video_index[..., None].repeat(1, 1, self.params.dim), video_feature)\n \n for i, layer in enumerate(self.layers[-1 * self.adapter_layer:]):\n vqa_h = layer(vqa_h, start_pos, freqs_cis, mask, adapter[i].half(), vqa_video_start)\n \n if self.args.vaq and not inference:\n vaq_h = layer(vaq_h, start_pos, freqs_cis, mask, adapter[i].half(), vaq_video_start)\n \n if self.args.qav and not inference:\n qav_h = layer(qav_h, start_pos, freqs_cis, mask, adapter[i].half(), None)\n \n \n vqa_h = self.norm(vqa_h)\n vqa_output = self.output(vqa_h)\n vqa_output = vqa_output[:, :-1, :].reshape(-1, self.vocab_size)\n vqa_loss = self.vqa_criterion(vqa_output, vqa_label)\n \n if self.args.vaq and not inference:\n vaq_h = self.norm(vaq_h)\n vaq_output = self.output(vaq_h)\n vaq_output = vaq_output[:, :-1, :].reshape(-1, self.vocab_size)\n vaq_loss = self.vaq_criterion(vaq_output, vaq_label)\n \n if self.args.qav and not inference:\n qav_h = self.norm(qav_h)\n qav_output = torch.bmm(qav_h[:, :-1].float(), _video_feature.transpose(1, 2).float()).reshape(-1, self.max_feats)\n qav_loss = self.qav_criterion(qav_output / self.tau, qav_label)\n \n if inference:\n logits = self.inference_criterion(vqa_output, vqa_label)\n logits = logits.reshape(bsz, n_options, -1)\n return logits\n else:\n return vqa_loss, vaq_loss, qav_loss"
},
{
"identifier": "Tokenizer",
"path": "llama/tokenizer.py",
"snippet": "class Tokenizer:\n def __init__(self, model_path: str):\n # reload tokenizer\n assert os.path.isfile(model_path), model_path\n self.sp_model = SentencePieceProcessor(model_file=model_path)\n logger.info(f\"Reloaded SentencePiece model from {model_path}\")\n\n # BOS / EOS token IDs\n self.n_words: int = self.sp_model.vocab_size()\n self.bos_id: int = self.sp_model.bos_id()\n self.eos_id: int = self.sp_model.eos_id()\n self.pad_id: int = self.sp_model.pad_id()\n \n self.v_token_id = 15167\n self.q_token_id = 16492\n self.a_token_id = 22550\n self.nl_id = 13\n logger.info(f\"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}\")\n assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()\n\n def encode(self, s: str, bos: bool, eos: bool) -> List[int]:\n assert type(s) is str\n t = self.sp_model.encode(s)\n if bos:\n t = [self.bos_id] + t\n if eos:\n t = t + [self.eos_id]\n return t\n\n def encode_vqa(self, text=None, max_feats=10, split='train', answer_mapping=None, answer=None) -> List[int]:\n i_text = \"Instruction: Predict the answer based on the video and question.\\n\"\n q_text = text['q_text']\n o_text = text['o_text']\n a_text = text['a_text']\n \n s1 = i_text + 'Video:'\n t1 = [self.bos_id] + self.sp_model.encode(s1)\n video_start = len(t1)\n\n s2 = q_text + o_text + a_text\n\n if split == 'train':\n s2 = s2 + answer_mapping[answer] \n t2 = self.sp_model.encode(s2) + [self.eos_id]\n t = [t1 + [-2 for _ in range(max_feats)] + [self.nl_id] + t2]\n prefix_index = t[0].index(self.a_token_id) + 5\n else:\n t = []\n for k, v in answer_mapping.items():\n t2 = self.sp_model.encode(s2 + v) + [self.eos_id]\n t.append(t1 + [-2 for _ in range(max_feats)] + [self.nl_id] + t2)\n prefix_index = t[answer].index(self.a_token_id) + 5\n return t, prefix_index, video_start\n\n def encode_vaq(self, text=None, max_feats=10, split='train', answer_mapping=None, answer=None) -> List[int]:\n i_text = \"Instruction: Predict the question based on the video and answer.\\n\"\n q_text = text['q_text'].strip()\n o_text = text['o_text']\n a_text = text['a_text']\n \n s1 = i_text + 'Video:'\n t1 = [self.bos_id] + self.sp_model.encode(s1)\n video_start = len(t1)\n \n s2 = o_text + a_text\n \n if split == 'train':\n s2 = s2 + answer_mapping[answer] + \"\\n\" + q_text\n t2 = self.sp_model.encode(s2) + [self.eos_id]\n t = [t1 + [-2 for _ in range(max_feats)] + [self.nl_id] + t2]\n prefix_index = t[0].index(self.q_token_id) + 2\n else:\n t = []\n for k, v in answer_mapping.items():\n t2 = self.sp_model.encode(s2 + v + \"\\n\" + q_text) + [self.eos_id]\n t.append(t1 + [-2 for _ in range(max_feats)] + [self.nl_id] + t2)\n prefix_index = t[answer].index(self.q_token_id) + 2\n return t, prefix_index, video_start\n \n def encode_qav(self, text=None, max_feats=10, split='train', answer_mapping=None, answer=None) -> List[int]:\n i_text = \"Instruction: Predict the video based on the question and answer.\\n\"\n q_text = text['q_text']\n o_text = text['o_text']\n a_text = text['a_text']\n \n s1 = i_text + q_text + o_text + a_text\n \n if split == 'train':\n s1 = s1 + answer_mapping[answer] + \"\\n\" + \"Video:\"\n t1 = [self.bos_id] + self.sp_model.encode(s1)\n t = [t1 + [-2 for _ in range(max_feats)] + [self.eos_id]]\n prefix_index = t[0].index(self.v_token_id) + 2\n else:\n t = []\n for k, v in answer_mapping.items():\n t1 = [self.bos_id] + self.sp_model.encode(s1 + v + \"\\n\" + \"Video:\") + [-2 for _ in range(max_feats)] + [self.eos_id]\n t.append(t1)\n prefix_index = t[answer].index(self.v_token_id) + 2\n return t, prefix_index\n\n def decode(self, t: List[int]) -> str:\n return self.sp_model.decode(t)\n\n def encode_dvqa(self, text=None, max_feats=10, split='train', answer_mapping=None, answer=None) -> List[int]:\n i_text = \"Instruction: Predict the answer based on the dialogue, video and question.\\n\"\n q_text = text['q_text']\n o_text = text['o_text']\n a_text = text['a_text']\n d_text = text['d_text']\n \n s1 = i_text + 'Video:'\n t1 = [self.bos_id] + self.sp_model.encode(s1)\n video_start = len(t1)\n \n prefix_i = video_start + max_feats + 1\n d1 = self.sp_model.encode(d_text)\n prefix_main = prefix_i + len(d1)\n\n s2 = q_text + o_text + a_text\n\n if split == 'train':\n s2 = s2 + answer_mapping[answer] \n t2 = self.sp_model.encode(s2) + [self.eos_id]\n t = [t1 + [-2 for _ in range(max_feats)] + [self.nl_id] + d1 + t2]\n else:\n t = []\n for k, v in answer_mapping.items():\n t2 = self.sp_model.encode(s2 + v) + [self.eos_id]\n t.append(t1 + [-2 for _ in range(max_feats)] + [self.nl_id] + d1 + t2)\n\n prefix_index = len(t[0]) - 4\n \n return t, prefix_index, video_start, prefix_i, prefix_main\n\n def encode_dvaq(self, text=None, max_feats=10, split='train', answer_mapping=None, answer=None) -> List[int]:\n i_text = \"Instruction: Predict the question based on the dialogue, video and answer.\\n\"\n q_text = text['q_text'].strip()\n o_text = text['o_text']\n a_text = text['a_text']\n d_text = text['d_text']\n \n s1 = i_text + 'Video:'\n t1 = [self.bos_id] + self.sp_model.encode(s1)\n video_start = len(t1)\n \n prefix_i = video_start + max_feats + 1\n d1 = self.sp_model.encode(d_text)\n prefix_main = prefix_i + len(d1)\n\n s2 = o_text + a_text\n \n if split == 'train':\n s2 = s2 + answer_mapping[answer] + \"\\n\" + q_text\n t2 = self.sp_model.encode(s2) + [self.eos_id]\n t = [t1 + [-2 for _ in range(max_feats)] + [self.nl_id] + d1 + t2]\n else:\n t = []\n for k, v in answer_mapping.items():\n t2 = self.sp_model.encode(s2 + v + \"\\n\" + q_text) + [self.eos_id]\n t.append(t1 + [-2 for _ in range(max_feats)] + [self.nl_id] + d1 + t2)\n \n prefix_index = t[0].index(self.q_token_id) + 2\n \n return t, prefix_index, video_start, prefix_i, prefix_main\n \n def encode_dqav(self, text=None, max_feats=10, max_seq_len=128, split='train', answer_mapping=None, answer=None) -> List[int]:\n i_text = \"Instruction: Predict the video based on the dialogue, question and answer.\\n\"\n d_text = text['d_text']\n q_text = text['q_text']\n o_text = text['o_text']\n a_text = text['a_text']\n s1, s2, s3 = i_text, d_text, q_text + o_text + a_text\n\n t1 = [self.bos_id] + self.sp_model.encode(s1)\n t2 = self.sp_model.encode(s2)\n prefix_i, prefix_q = len(t1), len(t1) + len(t2)\n\n if split == 'train':\n t3 = self.sp_model.encode(s3 + answer_mapping[answer] + \"\\n\" + \"Video:\")\n t = [t1 + t2 + t3 + [-2 for _ in range(max_feats)] + [self.eos_id]]\n else:\n t = []\n for k, v in answer_mapping.items():\n t3 = self.sp_model.encode(s3 + v + \"\\n\" + \"Video:\") + [-2 for _ in range(max_feats)] + [self.eos_id]\n t.append(t1 + t2 + t3)\n \n prefix_index = len(t[0]) - max_feats - 1\n \n return t, prefix_index, prefix_i, prefix_q"
}
] | import torch
import json
from llama import ModelArgs, Tokenizer, Transformer
from pathlib import Path | 4,682 |
def LLaMA_VQA(args, **kwargs):
with open(f'{args.llama_model_path}{args.model}/params.json', "r") as f:
params = json.loads(f.read())
tokenizer = Tokenizer(model_path=f'{args.llama_model_path}/tokenizer.model')
print(f"Using model: {args.model}")
checkpoints = (Path(args.llama_model_path) / args.model).glob("*.pth")
checkpoints = sorted(checkpoints)
loaded = []
for x in checkpoints:
print("loading from", x)
loaded.append(torch.load(x, map_location="cpu"))
if len(loaded) == 1:
full_state_dict = loaded[0]
else:
full_state_dict = {}
split_dims = {}
def add_weight_with_split_dim(name, dim):
if dim < 0: # bcast without split
full_state_dict[name] = loaded[0][name].clone()
else:
full_state_dict[name] = torch.cat([x[name] for x in loaded], dim=dim)
for x in loaded:
del x[name]
split_dims[name] = dim
add_weight_with_split_dim("tok_embeddings.weight", 1)
add_weight_with_split_dim("norm.weight", -1)
add_weight_with_split_dim("output.weight", 0)
for i in range(params["n_layers"]):
print("gathering layer %d of %d" % (i, params["n_layers"]))
layer_prefix = f"layers.{i}."
bcast_names = ["attention_norm.weight", "ffn_norm.weight"]
column_parallel_names = ["attention.wq.weight", "attention.wk.weight", "attention.wv.weight", "feed_forward.w1.weight", "feed_forward.w3.weight"]
row_parallel_names = ["attention.wo.weight", "feed_forward.w2.weight"]
for key in bcast_names:
add_weight_with_split_dim(layer_prefix + key, -1)
for key in column_parallel_names:
add_weight_with_split_dim(layer_prefix + key, 0)
for key in row_parallel_names:
add_weight_with_split_dim(layer_prefix + key, 1)
|
def LLaMA_VQA(args, **kwargs):
with open(f'{args.llama_model_path}{args.model}/params.json', "r") as f:
params = json.loads(f.read())
tokenizer = Tokenizer(model_path=f'{args.llama_model_path}/tokenizer.model')
print(f"Using model: {args.model}")
checkpoints = (Path(args.llama_model_path) / args.model).glob("*.pth")
checkpoints = sorted(checkpoints)
loaded = []
for x in checkpoints:
print("loading from", x)
loaded.append(torch.load(x, map_location="cpu"))
if len(loaded) == 1:
full_state_dict = loaded[0]
else:
full_state_dict = {}
split_dims = {}
def add_weight_with_split_dim(name, dim):
if dim < 0: # bcast without split
full_state_dict[name] = loaded[0][name].clone()
else:
full_state_dict[name] = torch.cat([x[name] for x in loaded], dim=dim)
for x in loaded:
del x[name]
split_dims[name] = dim
add_weight_with_split_dim("tok_embeddings.weight", 1)
add_weight_with_split_dim("norm.weight", -1)
add_weight_with_split_dim("output.weight", 0)
for i in range(params["n_layers"]):
print("gathering layer %d of %d" % (i, params["n_layers"]))
layer_prefix = f"layers.{i}."
bcast_names = ["attention_norm.weight", "ffn_norm.weight"]
column_parallel_names = ["attention.wq.weight", "attention.wk.weight", "attention.wv.weight", "feed_forward.w1.weight", "feed_forward.w3.weight"]
row_parallel_names = ["attention.wo.weight", "feed_forward.w2.weight"]
for key in bcast_names:
add_weight_with_split_dim(layer_prefix + key, -1)
for key in column_parallel_names:
add_weight_with_split_dim(layer_prefix + key, 0)
for key in row_parallel_names:
add_weight_with_split_dim(layer_prefix + key, 1)
| model_args: ModelArgs = ModelArgs(max_seq_len=args.max_seq_len, max_batch_size=32, adapter_len=args.adapter_len, adapter_layer=args.adapter_layer, **params) | 0 | 2023-10-19 02:06:04+00:00 | 8k |
openvpi/SingingVocoders | models/nsf_univnet/nsfunivnet.py | [
{
"identifier": "LVCBlock",
"path": "modules/univ_ddsp/block.py",
"snippet": "class LVCBlock(torch.nn.Module):\n ''' the location-variable convolutions\n '''\n\n def __init__(self,\n in_channels,\n cond_channels,\n upsample_ratio,\n conv_layers=4,\n conv_kernel_size=3,\n cond_hop_length=256,\n kpnet_hidden_channels=64,\n kpnet_conv_size=3,\n kpnet_dropout=0.0\n ):\n super().__init__()\n self.nsfppj=torch.nn.Conv1d(in_channels*2,in_channels,kernel_size=1)\n\n self.cond_hop_length = cond_hop_length\n self.conv_layers = conv_layers\n self.conv_kernel_size = conv_kernel_size\n self.convs = torch.nn.ModuleList()\n\n self.upsample = torch.nn.ConvTranspose1d(in_channels, in_channels,\n kernel_size=upsample_ratio*2, stride=upsample_ratio,\n padding=upsample_ratio // 2 + upsample_ratio % 2,\n output_padding=upsample_ratio % 2)\n\n\n self.kernel_predictor = KernelPredictor(\n cond_channels=cond_channels,\n conv_in_channels=in_channels,\n conv_out_channels=2 * in_channels,\n conv_layers=conv_layers,\n conv_kernel_size=conv_kernel_size,\n kpnet_hidden_channels=kpnet_hidden_channels,\n kpnet_conv_size=kpnet_conv_size,\n kpnet_dropout=kpnet_dropout\n )\n\n\n for i in range(conv_layers):\n padding = (3 ** i) * int((conv_kernel_size - 1) / 2)\n conv = torch.nn.Conv1d(in_channels, in_channels, kernel_size=conv_kernel_size, padding=padding, dilation=3 ** i)\n\n self.convs.append(conv)\n\n\n def forward(self, x, c,spec):\n ''' forward propagation of the location-variable convolutions.\n Args:\n x (Tensor): the input sequence (batch, in_channels, in_length)\n c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)\n\n Returns:\n Tensor: the output sequence (batch, in_channels, in_length)\n '''\n batch, in_channels, in_length = x.shape\n\n\n kernels, bias = self.kernel_predictor(c)\n\n x = F.leaky_relu(x, 0.2)\n x = self.upsample(x)\n x=self.nsfppj(torch.cat([x,spec],dim=1))\n for i in range(self.conv_layers):\n y = F.leaky_relu(x, 0.2)\n y = self.convs[i](y)\n y = F.leaky_relu(y, 0.2)\n\n k = kernels[:, i, :, :, :, :]\n b = bias[:, i, :, :]\n y = self.location_variable_convolution(y, k, b, 1, self.cond_hop_length)\n x = x + torch.sigmoid(y[:, :in_channels, :]) * torch.tanh(y[:, in_channels:, :])\n return x\n\n def location_variable_convolution(self, x, kernel, bias, dilation, hop_size):\n ''' perform location-variable convolution operation on the input sequence (x) using the local convolution kernl.\n Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100.\n Args:\n x (Tensor): the input sequence (batch, in_channels, in_length).\n kernel (Tensor): the local convolution kernel (batch, in_channel, out_channels, kernel_size, kernel_length)\n bias (Tensor): the bias for the local convolution (batch, out_channels, kernel_length)\n dilation (int): the dilation of convolution.\n hop_size (int): the hop_size of the conditioning sequence.\n Returns:\n (Tensor): the output sequence after performing local convolution. (batch, out_channels, in_length).\n '''\n batch, in_channels, in_length = x.shape\n batch, in_channels, out_channels, kernel_size, kernel_length = kernel.shape\n\n\n assert in_length == (kernel_length * hop_size), \"length of (x, kernel) is not matched\"\n\n padding = dilation * int((kernel_size - 1) / 2)\n x = F.pad(x, (padding, padding), 'constant', 0) # (batch, in_channels, in_length + 2*padding)\n x = x.unfold(2, hop_size + 2 * padding, hop_size) # (batch, in_channels, kernel_length, hop_size + 2*padding)\n\n if hop_size < dilation:\n x = F.pad(x, (0, dilation), 'constant', 0)\n x = x.unfold(3, dilation,\n dilation) # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation)\n x = x[:, :, :, :, :hop_size]\n x = x.transpose(3, 4) # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation)\n x = x.unfold(4, kernel_size, 1) # (batch, in_channels, kernel_length, dilation, _, kernel_size)\n\n o = torch.einsum('bildsk,biokl->bolsd', x, kernel)\n o = o + bias.unsqueeze(-1).unsqueeze(-1)\n o = o.contiguous().view(batch, out_channels, -1)\n return o"
},
{
"identifier": "CombSub",
"path": "modules/ddsp/vocoder.py",
"snippet": "class CombSub(torch.nn.Module):\n def __init__(self, \n sampling_rate,\n block_size,\n win_length,\n n_mag_harmonic,\n n_mag_noise,\n n_mels=80):\n super().__init__()\n\n print(' [DDSP Model] Combtooth Subtractive Synthesiser')\n # params\n self.register_buffer(\"sampling_rate\", torch.tensor(sampling_rate))\n self.register_buffer(\"block_size\", torch.tensor(block_size))\n self.register_buffer(\"win_length\", torch.tensor(win_length))\n self.register_buffer(\"window\", torch.hann_window(win_length))\n # Mel2Control\n split_map = {\n 'harmonic_phase': win_length // 2 + 1,\n 'harmonic_magnitude': n_mag_harmonic, \n 'noise_magnitude': n_mag_noise\n }\n self.mel2ctrl = Mel2Control(n_mels, split_map)\n\n def forward(self, mel_frames, f0_frames, initial_phase=None, infer=True, **kwargs):\n '''\n mel_frames: B x n_frames x n_mels\n f0_frames: B x n_frames x 1\n '''\n # exciter phase\n f0 = upsample(f0_frames, self.block_size)\n if infer:\n x = torch.cumsum(f0.double() / self.sampling_rate, axis=1)\n else:\n x = torch.cumsum(f0 / self.sampling_rate, axis=1)\n if initial_phase is not None:\n x += initial_phase.to(x) / 2 / np.pi\n\n x = x - torch.round(x)\n x = x.to(f0)\n\n phase_frames = 2 * np.pi * x[:, ::self.block_size, :]\n\n # parameter prediction\n ctrls = self.mel2ctrl(mel_frames, phase_frames)\n\n \n src_allpass = torch.exp(1.j * np.pi * ctrls['harmonic_phase'])\n src_allpass = torch.cat((src_allpass, src_allpass[:,-1:,:]), 1)\n src_param = torch.exp(ctrls['harmonic_magnitude'])\n noise_param = torch.exp(ctrls['noise_magnitude']) / 128\n \n # combtooth exciter signal\n combtooth = torch.sinc(self.sampling_rate * x / (f0 + 1e-3))\n combtooth = combtooth.squeeze(-1) \n \n # harmonic part filter (using dynamic-windowed LTV-FIR)\n pass\n harmonic = frequency_filter(\n combtooth,\n torch.complex(src_param, torch.zeros_like(src_param)),\n hann_window = True,\n half_width_frames = 1.5 * self.sampling_rate / (f0_frames + 1e-3))\n\n # harmonic part filter (all pass)\n harmonic_spec = torch.stft(\n harmonic,\n n_fft = self.win_length,\n win_length = self.win_length,\n hop_length = self.block_size,\n window = self.window,\n center = True,\n return_complex = True)\n harmonic_spec = harmonic_spec * src_allpass.permute(0, 2, 1)\n\n harmonic = torch.istft(\n harmonic_spec,\n n_fft = self.win_length,\n win_length = self.win_length,\n hop_length = self.block_size,\n window = self.window,\n center = True)\n \n # noise part filter (using constant-windowed LTV-FIR)\n noise = torch.rand_like(harmonic).to(noise_param) * 2 - 1\n noise = frequency_filter( #极高的cpu占用 原因未知--torch2.1\n noise,\n torch.complex(noise_param, torch.zeros_like(noise_param)),\n hann_window = True)\n \n signal = harmonic + noise\n\n return signal, phase_frames, (harmonic, noise)"
},
{
"identifier": "Sins",
"path": "modules/ddsp/vocoder.py",
"snippet": "class Sins(torch.nn.Module):\n def __init__(self, \n sampling_rate,\n block_size,\n win_length,\n n_harmonics,\n n_mag_noise,\n n_mels=80):\n super().__init__()\n\n print(' [DDSP Model] Sinusoids Additive Synthesiser')\n\n # params\n self.register_buffer(\"sampling_rate\", torch.tensor(sampling_rate))\n self.register_buffer(\"block_size\", torch.tensor(block_size))\n self.register_buffer(\"win_length\", torch.tensor(win_length))\n self.register_buffer(\"window\", torch.hann_window(win_length))\n # Mel2Control\n split_map = {\n 'harmonic_phase': win_length // 2 + 1,\n 'amplitudes': n_harmonics,\n 'noise_magnitude': n_mag_noise,\n }\n self.mel2ctrl = Mel2Control(n_mels, split_map)\n\n def forward(self, mel_frames, f0_frames, initial_phase=None, infer=True, max_upsample_dim=32):\n '''\n mel_frames: B x n_frames x n_mels\n f0_frames: B x n_frames x 1\n '''\n # exciter phase\n f0 = upsample(f0_frames, self.block_size)\n if infer:\n x = torch.cumsum(f0.double() / self.sampling_rate, axis=1)\n else:\n x = torch.cumsum(f0 / self.sampling_rate, axis=1)\n if initial_phase is not None:\n x += initial_phase.to(x) / 2 / np.pi \n x = x - torch.round(x)\n x = x.to(f0)\n \n phase = 2 * np.pi * x\n phase_frames = phase[:, ::self.block_size, :]\n \n # parameter prediction\n ctrls = self.mel2ctrl(mel_frames, phase_frames)\n \n src_allpass = torch.exp(1.j * np.pi * ctrls['harmonic_phase'])\n src_allpass = torch.cat((src_allpass, src_allpass[:,-1:,:]), 1)\n amplitudes_frames = torch.exp(ctrls['amplitudes'])/ 128\n noise_param = torch.exp(ctrls['noise_magnitude']) / 128\n \n # sinusoids exciter signal \n amplitudes_frames = remove_above_fmax(amplitudes_frames, f0_frames, self.sampling_rate / 2, level_start = 1)\n n_harmonic = amplitudes_frames.shape[-1]\n level_harmonic = torch.arange(1, n_harmonic + 1).to(phase)\n sinusoids = 0.\n for n in range(( n_harmonic - 1) // max_upsample_dim + 1):\n start = n * max_upsample_dim\n end = (n + 1) * max_upsample_dim\n phases = phase * level_harmonic[start:end]\n amplitudes = upsample(amplitudes_frames[:,:,start:end], self.block_size)\n sinusoids += (torch.sin(phases) * amplitudes).sum(-1)\n \n # harmonic part filter (all pass)\n harmonic_spec = torch.stft(\n sinusoids,\n n_fft = self.win_length,\n win_length = self.win_length,\n hop_length = self.block_size,\n window = self.window,\n center = True,\n return_complex = True)\n harmonic_spec = harmonic_spec * src_allpass.permute(0, 2, 1)\n harmonic = torch.istft(\n harmonic_spec,\n n_fft = self.win_length,\n win_length = self.win_length,\n hop_length = self.block_size,\n window = self.window,\n center = True)\n \n # noise part filter (using constant-windowed LTV-FIR) \n noise = torch.rand_like(harmonic).to(noise_param) * 2 - 1\n noise = frequency_filter(\n noise,\n torch.complex(noise_param, torch.zeros_like(noise_param)),\n hann_window = True)\n \n signal = harmonic + noise\n\n return signal, phase, (harmonic, noise)"
}
] | import numpy as np
import torch
import logging
import torch.nn.functional as F
from torch import nn
from modules.univ_ddsp.block import LVCBlock
from modules.ddsp.vocoder import CombSub, Sins | 4,926 | super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values, upp):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
rad_values = (f0_values / self.sampling_rate).fmod(1.) # %1意味着n_har的乘积无法后处理优化
rand_ini = torch.rand(1, self.dim, device=f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] += rand_ini
is_half = rad_values.dtype is not torch.float32
tmp_over_one = torch.cumsum(rad_values.double(), 1) # % 1 #####%1意味着后面的cumsum无法再优化
if is_half:
tmp_over_one = tmp_over_one.half()
else:
tmp_over_one = tmp_over_one.float()
tmp_over_one *= upp
tmp_over_one = F.interpolate(
tmp_over_one.transpose(2, 1), scale_factor=upp,
mode='linear', align_corners=True
).transpose(2, 1)
rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1)
tmp_over_one = tmp_over_one.fmod(1.)
diff = F.conv2d(
tmp_over_one.unsqueeze(1), torch.FloatTensor([[[[-1.], [1.]]]]).to(tmp_over_one.device),
stride=(1, 1), padding=0, dilation=(1, 1)
).squeeze(1) # Equivalent to torch.diff, but able to export ONNX
cumsum_shift = (diff < 0).double()
cumsum_shift = torch.cat((
torch.zeros((f0_values.size()[0], 1, self.dim), dtype=torch.double).to(f0_values.device),
cumsum_shift
), dim=1)
sines = torch.sin(torch.cumsum(rad_values.double() + cumsum_shift, dim=1) * 2 * np.pi)
if is_half:
sines = sines.half()
else:
sines = sines.float()
return sines
@torch.no_grad()
def forward(self, f0, upp):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
f0 = f0.unsqueeze(-1)
fn = torch.multiply(f0, torch.arange(1, self.dim + 1, device=f0.device).reshape((1, 1, -1)))
sine_waves = self._f02sine(fn, upp) * self.sine_amp
uv = (f0 > self.voiced_threshold).float()
uv = F.interpolate(uv.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1)
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
sine_waves = sine_waves * uv + noise
return sine_waves
class SourceModuleHnNSF(torch.nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshold=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshold)
# to merge source harmonics into a single excitation
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
self.l_tanh = torch.nn.Tanh()
def forward(self, x, upp):
sine_wavs = self.l_sin_gen(x, upp)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
return sine_merge
class DDSP(nn.Module):
def __init__(self,config):
super().__init__()
if config['model_args']['type']=='CombSub':
self.ddsp = CombSub(
sampling_rate=config['audio_sample_rate'],
block_size=config['hop_size'],
win_length=config['win_size'],
n_mag_harmonic=config['model_args']['n_mag_harmonic'],
n_mag_noise=config['model_args']['n_mag_noise'],
n_mels=config['audio_num_mel_bins'])
elif config['model_args']['type']=='Sins':
| # from modules import LVCBlock
LRELU_SLOPE = 0.1
class SineGen(torch.nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-waveform (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_threshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num=0,
sine_amp=0.1, noise_std=0.003,
voiced_threshold=0):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values, upp):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
rad_values = (f0_values / self.sampling_rate).fmod(1.) # %1意味着n_har的乘积无法后处理优化
rand_ini = torch.rand(1, self.dim, device=f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] += rand_ini
is_half = rad_values.dtype is not torch.float32
tmp_over_one = torch.cumsum(rad_values.double(), 1) # % 1 #####%1意味着后面的cumsum无法再优化
if is_half:
tmp_over_one = tmp_over_one.half()
else:
tmp_over_one = tmp_over_one.float()
tmp_over_one *= upp
tmp_over_one = F.interpolate(
tmp_over_one.transpose(2, 1), scale_factor=upp,
mode='linear', align_corners=True
).transpose(2, 1)
rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1)
tmp_over_one = tmp_over_one.fmod(1.)
diff = F.conv2d(
tmp_over_one.unsqueeze(1), torch.FloatTensor([[[[-1.], [1.]]]]).to(tmp_over_one.device),
stride=(1, 1), padding=0, dilation=(1, 1)
).squeeze(1) # Equivalent to torch.diff, but able to export ONNX
cumsum_shift = (diff < 0).double()
cumsum_shift = torch.cat((
torch.zeros((f0_values.size()[0], 1, self.dim), dtype=torch.double).to(f0_values.device),
cumsum_shift
), dim=1)
sines = torch.sin(torch.cumsum(rad_values.double() + cumsum_shift, dim=1) * 2 * np.pi)
if is_half:
sines = sines.half()
else:
sines = sines.float()
return sines
@torch.no_grad()
def forward(self, f0, upp):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
f0 = f0.unsqueeze(-1)
fn = torch.multiply(f0, torch.arange(1, self.dim + 1, device=f0.device).reshape((1, 1, -1)))
sine_waves = self._f02sine(fn, upp) * self.sine_amp
uv = (f0 > self.voiced_threshold).float()
uv = F.interpolate(uv.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1)
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
sine_waves = sine_waves * uv + noise
return sine_waves
class SourceModuleHnNSF(torch.nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshold=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshold)
# to merge source harmonics into a single excitation
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
self.l_tanh = torch.nn.Tanh()
def forward(self, x, upp):
sine_wavs = self.l_sin_gen(x, upp)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
return sine_merge
class DDSP(nn.Module):
def __init__(self,config):
super().__init__()
if config['model_args']['type']=='CombSub':
self.ddsp = CombSub(
sampling_rate=config['audio_sample_rate'],
block_size=config['hop_size'],
win_length=config['win_size'],
n_mag_harmonic=config['model_args']['n_mag_harmonic'],
n_mag_noise=config['model_args']['n_mag_noise'],
n_mels=config['audio_num_mel_bins'])
elif config['model_args']['type']=='Sins': | self.ddsp = Sins( | 2 | 2023-10-17 13:45:09+00:00 | 8k |
OllieBoyne/FOUND | FOUND/utils/eval_utils.py | [
{
"identifier": "modified_chamf",
"path": "FOUND/utils/pytorch3d.py",
"snippet": "def modified_chamf(x,y, x_lengths=None, y_lengths=None,\n x_normals=None, y_normals=None,\n norm: int = 2):\n \"\"\"\n \tA modified version of pytorch3d.loss.chamfer_distance\n \tto allow for no point or batch reduction and some other changes\n \"\"\"\n\n if not ((norm == 1) or (norm == 2)):\n raise ValueError(\"Support for 1 or 2 norm.\")\n\n x, x_lengths, x_normals = _handle_pointcloud_input(x, x_lengths, x_normals)\n y, y_lengths, y_normals = _handle_pointcloud_input(y, y_lengths, y_normals)\n\n return_normals = x_normals is not None and y_normals is not None\n\n N, P1, D = x.shape\n P2 = y.shape[1]\n\n # Check if inputs are heterogeneous and create a lengths mask.\n is_x_heterogeneous = (x_lengths != P1).any()\n is_y_heterogeneous = (y_lengths != P2).any()\n x_mask = (\n torch.arange(P1, device=x.device)[None] >= x_lengths[:, None]\n ) # shape [N, P1]\n y_mask = (\n torch.arange(P2, device=y.device)[None] >= y_lengths[:, None]\n ) # shape [N, P2]\n\n if y.shape[0] != N or y.shape[2] != D:\n raise ValueError(\"y does not have the correct shape.\")\n\n cham_norm_x = x.new_zeros(())\n cham_norm_y = x.new_zeros(())\n\n x_nn = knn_points(x, y, lengths1=x_lengths, lengths2=y_lengths, norm=norm, K=1)\n y_nn = knn_points(y, x, lengths1=y_lengths, lengths2=x_lengths, norm=norm, K=1)\n\n cham_x = x_nn.dists[..., 0] # (N, P1)\n cham_y = y_nn.dists[..., 0] # (N, P2)\n\n if is_x_heterogeneous:\n cham_x[x_mask] = 0.0\n if is_y_heterogeneous:\n cham_y[y_mask] = 0.0\n\n\n # Gather the normals using the indices and keep only value for k=0\n x_normals_near = knn_gather(y_normals, x_nn.idx, y_lengths)[..., 0, :]\n y_normals_near = knn_gather(x_normals, y_nn.idx, x_lengths)[..., 0, :]\n\n cham_norm_x = torch.abs(\n F.cosine_similarity(x_normals, x_normals_near, dim=2, eps=1e-6)\n )\n cham_norm_y = torch.abs(\n F.cosine_similarity(y_normals, y_normals_near, dim=2, eps=1e-6)\n )\n\n return dict(cham_x=cham_x, cham_y=cham_y, cham_norm_x = cham_norm_x, cham_norm_y=cham_norm_y)"
},
{
"identifier": "modified_sample",
"path": "FOUND/utils/pytorch3d.py",
"snippet": "def modified_sample(meshes: Meshes, \n num_samples: int = 10000,\n return_normals: bool = False,\n return_textures: bool = False,):\n\n \"\"\"Modified version of pytorch3d.ops.sample_points_from_meshes\n that returns references to the faces sampled from\"\"\"\n\n if meshes.isempty():\n raise ValueError(\"Meshes are empty.\")\n\n verts = meshes.verts_packed()\n if not torch.isfinite(verts).all():\n raise ValueError(\"Meshes contain nan or inf.\")\n\n if return_textures and meshes.textures is None:\n raise ValueError(\"Meshes do not contain textures.\")\n\n faces = meshes.faces_packed()\n mesh_to_face = meshes.mesh_to_faces_packed_first_idx()\n num_meshes = len(meshes)\n num_valid_meshes = torch.sum(meshes.valid) # Non empty meshes.\n\n # Initialize samples tensor with fill value 0 for empty meshes.\n samples = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)\n\n # Only compute samples for non empty meshes\n with torch.no_grad():\n areas, _ = mesh_face_areas_normals(verts, faces) # Face areas can be zero.\n max_faces = meshes.num_faces_per_mesh().max().item()\n areas_padded = packed_to_padded(\n areas, mesh_to_face[meshes.valid], max_faces\n ) # (N, F)\n\n # TODO (gkioxari) Confirm multinomial bug is not present with real data.\n sample_face_idxs = areas_padded.multinomial(\n num_samples, replacement=True\n ) # (N, num_samples)\n sample_face_idxs += mesh_to_face[meshes.valid].view(num_valid_meshes, 1)\n\n # Get the vertex coordinates of the sampled faces.\n face_verts = verts[faces]\n v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]\n\n # Randomly generate barycentric coords.\n w0, w1, w2 = _rand_barycentric_coords(\n num_valid_meshes, num_samples, verts.dtype, verts.device\n )\n\n # Use the barycentric coords to get a point on each sampled face.\n a = v0[sample_face_idxs] # (N, num_samples, 3)\n b = v1[sample_face_idxs]\n c = v2[sample_face_idxs]\n samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c\n\n if return_normals:\n # Initialize normals tensor with fill value 0 for empty meshes.\n # Normals for the sampled points are face normals computed from\n # the vertices of the face in which the sampled point lies.\n normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)\n vert_normals = (v1 - v0).cross(v2 - v1, dim=1)\n vert_normals = vert_normals / vert_normals.norm(dim=1, p=2, keepdim=True).clamp(\n min=sys.float_info.epsilon\n )\n vert_normals = vert_normals[sample_face_idxs]\n normals[meshes.valid] = vert_normals\n\n if return_textures:\n # fragment data are of shape NxHxWxK. Here H=S, W=1 & K=1.\n pix_to_face = sample_face_idxs.view(len(meshes), num_samples, 1, 1) # NxSx1x1\n bary = torch.stack((w0, w1, w2), dim=2).unsqueeze(2).unsqueeze(2) # NxSx1x1x3\n # zbuf and dists are not used in `sample_textures` so we initialize them with dummy\n dummy = torch.zeros(\n (len(meshes), num_samples, 1, 1), device=meshes.device, dtype=torch.float32\n ) # NxSx1x1\n fragments = MeshFragments(\n pix_to_face=pix_to_face, zbuf=dummy, bary_coords=bary, dists=dummy\n )\n textures = meshes.sample_textures(fragments) # NxSx1x1xC\n textures = textures[:, :, 0, 0, :] # NxSxC\n\n out = {}\n\n out['verts'] = samples\n if return_normals: out['normals'] = normals\n if return_textures: out['textures'] = textures\n\n # return original faces\n out['face_idxs'] = sample_face_idxs\n\n return out"
},
{
"identifier": "Renderer",
"path": "FOUND/utils/renderer.py",
"snippet": "class Renderer(nn.Module):\n\n\tdef __init__(self, device='cuda', image_size=(256, 256),\n\t\t\t\t bin_size=None, z_clip_value=None,\n\t\t\t\t max_faces_per_bin=None, cam_params: dict = None,\n\t\t\t\t MAX_BATCH_SIZE=10,\n\t\t\t\t **kwargs):\n\n\t\tsuper().__init__()\n\n\t\tself.MAX_BATCH_SIZE = MAX_BATCH_SIZE\n\n\t\tif isinstance(image_size, int):\n\t\t\timage_size = (image_size, image_size)\n\n\t\tself.image_size = image_size\n\n\t\tself.img_raster_settings = RasterizationSettings(\n\t\t\timage_size=image_size, blur_radius=0.,\n\t\t\tfaces_per_pixel=1, max_faces_per_bin=max_faces_per_bin,\n\t\t\tbin_size=bin_size, z_clip_value=z_clip_value)\n\n\t\t# Rasterization settings for silhouette rendering\n\t\tsigma = 1e-6\n\t\tself.raster_settings_silhouette = RasterizationSettings(\n\t\t\timage_size=image_size,\n\t\t\tblur_radius=np.log(1. / 1e-4 - 1.) * sigma,\n\t\t\tfaces_per_pixel=10, max_faces_per_bin=max_faces_per_bin,\n\t\t\tbin_size=bin_size\n\t\t)\n\n\t\tself.rasterizer = MeshRasterizer(raster_settings=self.img_raster_settings)\n\t\tself.sil_rasterizer = MeshRasterizer(raster_settings=self.raster_settings_silhouette)\n\n\t\t# Shaders\n\t\tself.img_shader = SoftPhongShader(device=device)\n\t\tself.norm_shader = NormalShader()\n\t\tself.sil_shader = SoftSilhouetteShader()\n\n\t\t# default lighting\n\t\tself.lights = AmbientLights(device=device)\n\n\t\tself.camera_params = {}\n\t\tif cam_params is not None:\n\t\t\t# Multiple camera intrinsics not currently supported\n\t\t\tf = torch.tensor([[cam_params['focal_length']]]).to(device) # [N x 1]\n\t\t\tpp = torch.tensor(cam_params['principal_point']).unsqueeze(0).to(device) # [N x 2]\n\t\t\tself.camera_params = dict(focal_length=f, principal_point=pp,\n\t\t\t\t\t\t\t\t\t in_ndc=False, image_size=torch.tensor(image_size).unsqueeze(0).to(device))\n\n\tdef forward(self, meshes, R: torch.Tensor, T: torch.Tensor, keypoints=None,\n\t\t\t\trender_normals=True, render_rgb=True, render_sil=True,\n\t\t\t\tmask_out_faces=None, return_cameras=False, camera_params=None,\n\t\t\t\tnormals_fmt='blender', one_view_per_mesh=False):\n\t\t\"\"\"\n\t\tCan receive various number of 'views' (size of R) and meshes (size of 'meshes')\n\t\tN input views, 1 mesh -> render N views of 1 mesh\n\t\tN input views, N mesh -> render one view per mesh (only if one_view_per_mesh is True)\n\t\tN input views, M mesh -> render N views of M meshes\n\n\t\tRender modes:\n\t\t\t- render_rgb: render RGB image\n\t\t\t- render_normals: render surface normals\n\t\t\t- render_sil: render silhouette\n\t\t\t- keypoints: project 3D keypoints onto image\n\n\t\t:param R: [N x 4 x 4]\n\t\t:param T: [N x 4 x 4]\n\t\t:param keypoints: optional [M x P x 3] keypoints to render\n\t\t:param mask_out_faces: [M x F] faces per mesh to optionally remove from seg & normal\n\t\t:param camera_params: Optional per-camera focal length & principal point\n\t\t:return:\n\n\t\tCurrently does not support M > 1 rendering to M images.\n\t\t\"\"\"\n\n\t\tif camera_params is None:\n\t\t\tcamera_params = self.camera_params\n\n\t\tN = R.shape[0] # number of views\n\t\tM = len(meshes) # number of meshes\n\n\t\tif M > 1 and (N == M):\n\t\t\tassert one_view_per_mesh, \"For N == M, M > 1, requires one_view_per_mesh=True parameter.\"\n\n\t\t\tout_shape_rgb = (N, *self.image_size, 3)\n\t\t\tout_shape_single = (N, *self.image_size)\n\t\t\tbatch_size = N\n\n\t\t# in the case M != N for M > 1, want to render all N views for each mesh\n\t\telif M != N and M > 1:\n\t\t\tmeshes = meshes.extend(N) # produce a mesh for each view\n\t\t\tR = torch.cat([R] * M, dim=0)\n\t\t\tT = torch.cat([T] * M, dim=0) # produce R, T for each mesh\n\n\t\t\tout_shape_rgb = (N, M, *self.image_size, 3)\n\t\t\tout_shape_single = (N, M, *self.image_size)\n\t\t\tbatch_size = N * M\n\n\t\t# in the case M = 1, N >= 1, render N views of 1 mesh\n\t\telse:\n\t\t\tmeshes = meshes.extend(N) # produce a mesh for each view\n\t\t\tout_shape_rgb = (N, *self.image_size, 3)\n\t\t\tout_shape_single = (N, *self.image_size)\n\n\t\tcameras = PerspectiveCameras(device=meshes.device, R=R, T=T, **camera_params)\n\n\t\tout = dict()\n\t\t_frags = None\n\t\tnormals = None\n\t\tif render_rgb or render_normals:\n\t\t\tfragments = self.rasterizer(meshes, cameras=cameras)\n\t\t\t_frags = fragments # Store fragments for mask out faces\n\n\t\t\tif render_rgb:\n\t\t\t\tout['rgb'] = self.img_shader(fragments, meshes, cameras=cameras, lights=self.lights)[..., :3].reshape(\n\t\t\t\t\tout_shape_rgb)\n\n\t\t\tif render_normals:\n\t\t\t\tnormals = self.norm_shader(fragments, meshes, cameras=cameras)\n\n\t\tif render_sil:\n\t\t\tfragments_sil = self.sil_rasterizer(meshes, cameras=cameras)\n\t\t\tif _frags is None: _frags = fragments_sil # Store fragments for mask out faces\n\n\t\t\tsil = self.sil_shader(fragments_sil, meshes, cameras=cameras)\n\t\t\tout['sil'] = sil[..., -1].reshape(out_shape_single) # return just alpha channel (silhouette)\n\n\t\t# Apply face masking of FIND model\n\t\tif (render_rgb or render_sil or render_normals) and mask_out_faces is not None:\n\t\t\t# get foremost face for each pixel in correct format\n\t\t\tpix_to_face = get_padded_pix_to_face(_frags.pix_to_face[..., 0], meshes).reshape(out_shape_single)\n\n\t\t\tfor n in range(N):\n\t\t\t\tmask_pix = torch.isin(pix_to_face[n], mask_out_faces)\n\n\t\t\t\tif render_rgb:\n\t\t\t\t\tout['rgb'][n][mask_pix] = 1. # set pixels to white\n\n\t\t\t\tif render_sil:\n\t\t\t\t\tout['sil'][n, mask_pix] = 0.\n\n\t\t\t\tif render_normals:\n\t\t\t\t\tnormals.mask[n] *= ~mask_pix # does not work for certain batch types\n\n\t\tif render_normals:\n\t\t\t# Also return rgb and xyz of normals\n\t\t\tout['norm_rgb'] = normals.to_rgb(format=normals_fmt, mask_value=.5).reshape(out_shape_rgb)\n\t\t\tout['norm_xyz'] = normals.to_xyz(format=normals_fmt).reshape(out_shape_rgb)\n\n\t\tif keypoints is not None:\n\t\t\tkps_2d = cameras.transform_points_screen(keypoints, image_size=self.image_size)[..., :2]\n\t\t\tout['kps'] = kps_2d\n\n\t\tif return_cameras:\n\t\t\tout['cameras'] = cameras\n\n\t\treturn out"
},
{
"identifier": "view_from",
"path": "FOUND/utils/renderer.py",
"snippet": "def view_from(view_kw='topdown', dist=.35):\n\tkws = ['topdown', 'side1', 'side2', 'toes', '45', '60']\n\n\tif isinstance(view_kw, str):\n\t\tview_kw = [view_kw]\n\n\tN = len(view_kw)\n\tR, T = torch.empty((N, 3, 3)), torch.empty((N, 3))\n\tfor n, v in enumerate(view_kw):\n\t\tassert v in kws or isinstance(v, int), f\"View description `{view_kw}` not understood\"\n\n\t\tdist, elev, azim, point = dist, 0, 0, ((0, 0, 0),)\n\t\tif v == 'topdown': elev = 0\n\t\tif v == 'side1': elev = 90\n\t\tif v == 'side2': elev, azim = -90, 180\n\t\tif v == 'toes': point = ((0.1, 0, 0),); dist = 0.1\n\t\tif isinstance(v, int):\n\t\t\telev = v\n\n\t\t_R, _T = look_at_view_transform(dist=dist, elev=elev, azim=azim, up=((1, 0, 0),), at=point)\n\n\t\tR[n] = _R\n\t\tT[n] = _T\n\n\treturn R, T"
},
{
"identifier": "produce_grid",
"path": "FOUND/utils/vis.py",
"snippet": "def produce_grid(entries):\n\t\"\"\"Receives list of lists, containing several possible data types. Converts them all to the correct RGB uint8 format, combines into a single image, and returns.\n\n\tAccepted formats:\n\tTensor, any device, >= 2 dims (will take first element in all above last 3), >= 3 channels (will take first 3) OR 1 channel (segmentation)\n\tnp.ndarray (same rules as tensor)\n\tNone - fill with blank\n\n\tPads all rows with black images if not enough elements\n\t\"\"\"\n\n\tif not isinstance(entries[0], list):\n\t\tentries = [entries] # convert to 2D list of lists\n\n\tM = max(map(len, entries))\n\n\tH, W = None, None\n\n\trows = []\n\tfor j, raw_row in enumerate(entries):\n\t\trow = []\n\t\tfor i, entry in enumerate(raw_row):\n\t\t\tif entry is None:\n\t\t\t\tentry = np.zeros((H, W, 3), dtype=np.uint8)\n\n\t\t\tentry = tens2rgb(entry)\n\n\t\t\tassert entry.ndim >= 2, f\"Arrays for grid must have >= 2 dimensions. Entry ({i}, {j}) has shape {entry.shape}.\"\n\t\t\tentry = reduce_ax(entry, 3) # reduce dimensions to just get a single image\n\n\t\t\t# handle segmentations\n\t\t\tif entry.shape[-1] > 4: # if last axis is clearly a width/height axis\n\t\t\t\tentry = seg_to_rgb(reduce_ax(entry, 2))\n\n\t\t\tentry = entry[..., :3] # only take first 3 channels\n\n\t\t\tif i == j == 0:\n\t\t\t\tH, W, _ = entry.shape\n\n\t\t\tentry = entry.astype(np.uint8)\n\t\t\trow.append(entry)\n\n\t\tfor i in range(M - len(raw_row)):\n\t\t\trow.append(np.zeros((H, W, 3), dtype=np.uint8)) # pad each row with black images if not enough items\n\n\t\t# stack the row images together\n\t\ttry:\n\t\t\trows.append(np.hstack(row))\n\t\texcept:\n\t\t\traise ValueError(\n\t\t\t\tf\"Could not combine row {j}, of raw shapes: {[x.shape for x in raw_row]}. Attempted conversion to shapes: {[x.shape for x in row]}\")\n\n\treturn np.vstack(rows)"
},
{
"identifier": "put_text",
"path": "FOUND/utils/vis.py",
"snippet": "def put_text(img, string, x, y, width, height, backg=(0,0,0), scale=1, vertical=False):\n\t\"\"\"Place text on an image, with top left corner (x,y), and a given width height.\n\tWhite text, black background fixed.\n\tVertical flag used to rotate 90 degrees anticlockwise\"\"\"\n\n\tout = img.copy()\n\tout[y:y+height, x:x+width] = get_text(string.split('\\n'), width, height, scale=scale, backg=backg, vertical=vertical)\n\treturn out"
},
{
"identifier": "colourbar",
"path": "FOUND/utils/vis.py",
"snippet": "def colourbar(width, height, colours, points=(0, 1), orientation='vertical'):\n\t\"\"\"Produce a colour bar of size width x height.\n\tAt each point in `points`, the colour at point along the horizontal/vertical (depending on `orientation`)\n\tmust be the corresponding colour in `colour`. Between points, linearly interpolate.\"\"\"\n\n\tassert len(colours) == len(points), \"Colours to points must be 1-1 correspondence for colourbar\"\n\tcolours = np.array(colours)\n\n\timg = np.zeros((height, width, 3))\n\tfor (c0, p0, c1, p1) in zip(colours, points, colours[1:], points[1:]):\n\t\tif orientation == 'vertical':\n\t\t\tv0, v1 = int(p0*height), int(p1*height)\n\t\t\timg[v0: v1] = c0[None, None, :] + np.linspace(0, 1, v1-v0)[:, None, None] * (c1 - c0)[None, None, :]\n\n\t\telse:\n\t\t\th0, h1 = int(p0 * width), int(p1 * width)\n\t\t\timg[:, h0:h1] = c0 + np.linspace(0, 1, h1 - h0) * (c1 - c0)\n\n\treturn img.astype(np.uint8)"
}
] | from pytorch3d.renderer import TexturesVertex
from pytorch3d.structures import Meshes
from multiprocessing import Process
from prettytable import PrettyTable
from .pytorch3d import modified_chamf, modified_sample
from .renderer import Renderer, view_from
from .vis import produce_grid, put_text, colourbar
from matplotlib import pyplot as plt
import os
import trimesh
import cv2
import multiprocessing as mp
import torch
import torch.nn.functional as F
import numpy as np
import json | 6,852 |
fracs = (err - vmin) / (vmax - vmin)
rgba = (colmin + fracs.unsqueeze(-1) * (colmax - colmin)).to(err.device)
rgba = torch.clip(rgba, min=0, max=1)
rgba[torch.any(torch.isnan(rgba), dim=-1)] = colnan
return rgba
class Reporter:
"""Receive statements, on exit print all and save all to file"""
def __init__(self, out_file_loc):
self.lines = []
self.out_file_loc = out_file_loc
def __call__(self, line):
self.lines.append(line)
def __enter__(self, *args):
return self
def __exit__(self, *args):
[*map(print, self.lines)]
with open(self.out_file_loc, 'w') as outfile:
outfile.writelines([s + '\n' for s in self.lines])
def get_max_fit(exp_dir):
"""Search in an experiment directory for the fit_xx.obj with the highest value"""
f = lambda s: -1 if 'fit_' not in s else int(s.split('fit_')[1].split('.obj')[0])
return max(os.listdir(exp_dir), key=f)
def cutoff_slice_FIND(mesh, max_heel_height = 0.04, cutoff_height = 0.1):
"""Similar mesh slicing method to FIND: identify heel keypoint, slice off 1cm above"""
X, Y, Z = mesh.vertices.T
Xma = np.ma.array(X, mask= Z >= max_heel_height)
heel_idx = np.ma.argmin(Xma)
slice_height = min(Z[heel_idx] + cutoff_height, Z.max() - 5e-3)
return mesh.slice_plane([0, 0, slice_height], [0, 0, -1], cap=False)
def get_loghist(x, nbins):
hist, bins = np.histogram(x, bins=nbins)
logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins))
return dict(x=x, bins=logbins)
def eval_exp(exp_dir, render=True):
results = {} # return results as errors
if not any('fit_' in f for f in os.listdir(exp_dir)):
print(f"No fits for {exp_dir}, skipping...")
return
pred_obj_loc = os.path.join(exp_dir, get_max_fit(exp_dir))
# load settings to get folder
opts_loc = os.path.join(exp_dir, 'opts.json')
if not os.path.isfile(opts_loc):
print(f"No opts for {exp_dir}, skipping...")
return
with open(opts_loc) as infile:
settings = json.load(infile)
# assume GT OBJ loc is
# (1) saved in <data_folder>/mesh.obj if <data_folder> given
if 'data_folder' in settings:
gt_obj_loc = os.path.join(settings['data_folder'], 'mesh.obj')
# (2) saved in <exp_dir>/gt_mesh.obj otherwise
else:
gt_obj_loc = os.path.join(exp_dir, 'gt_mesh.obj')
eval_dir = os.path.join(exp_dir, 'eval')
os.makedirs(eval_dir, exist_ok=True)
with open(gt_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
gt_mesh_trimesh = trimesh.Trimesh(**d)
with open(pred_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
pred_mesh_trimesh = trimesh.Trimesh(**d)
# pre-process meshes, w/ cutoff
# Same method as used for Foot3D here for slicing GT
gt_mesh_trimesh = cutoff_slice_FIND(gt_mesh_trimesh)
if settings.get('model', 'FIND') == 'FIND':
# slice FIND faces
FIND_cutoff_surface = np.load(os.path.join(settings['find_pth'], 'templ_masked_faces.npy'))
FIND_sole_faces = np.load(os.path.join(settings['find_pth'], 'templ_sole_faces.npy'))
FIND_sole_verts = np.unique(np.ravel(pred_mesh_trimesh.faces[FIND_sole_faces])) # all vertices considered part of the sole
sole_vert_positions = pred_mesh_trimesh.vertices[FIND_sole_verts] # save sole vertex positions to refind them after mesh pre-processing
pred_mesh_trimesh.update_faces(~np.isin(np.arange(pred_mesh_trimesh.faces.shape[0]), FIND_cutoff_surface))
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# define a mask
# want to be able to define a mask on the FIND model, so that errors of verts in this mask aren't considered real -> pred, but are considered in reverse
# (for sole verts, unfair to count the error on them, but likewise incorrect to just remove them all, especially at the boundary)
# recalculate sole vertices
FIND_sole_vert_idxs = np.argwhere(np.all(pred_mesh_trimesh.vertices[:, None, :] == sole_vert_positions[None, ...], axis=-1))[:, 0]
FIND_sole_vertex_mask = np.isin(np.arange(pred_mesh_trimesh.vertices.shape[0]), FIND_sole_vert_idxs) # mask of which vertices correspond to the sole
FIND_sole_faces_mask = np.any(FIND_sole_vertex_mask[pred_mesh_trimesh.faces], axis=-1) # mask of which faces are in sole
else:
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# Convert to PyTorch3D
p3d_from_trimesh = lambda mesh: Meshes(verts=torch.from_numpy(np.asarray(mesh.vertices)[None, ...]).float(),
faces=torch.from_numpy(np.asarray(mesh.faces)[None, ...])).to(device)
gt_mesh = p3d_from_trimesh(gt_mesh_trimesh)
pred_mesh = p3d_from_trimesh(pred_mesh_trimesh)
# Sample vertices uniformly from mesh, returning vertex position, normal, and original face/vert idxs
| """Evaluate the performance of a fitted mesh"""
device = 'cuda'
def eval_metrics(arr, cutoffs=[5, 7.5, 11.25, 22.5, 30]):
"""Given a 1d array, return mean, median, rmse,
and % of values less than each in `cutoffs`"""
assert arr.ndim == 1, "eval_metrics requires 1D array"
out = dict(mean = arr.mean(), median = np.median(arr), rmse = (arr ** 2).mean() **.5,
cutoffs = [(arr < i).mean() for i in cutoffs])
return out
def err_to_colour(err: torch.Tensor, vmin:float=None, vmax:float=None, colmin=(0, 1, 0), colmax=(1, 0, 0), nan_colour=(0.3, 0.3, 0.3)):
"""Convert a tensor of errors (...) to an RGB colour scale (..., 3).
Linearly interpolate so that err of vmin -> colmin, err of vmax -> colmax
if vmin and vmax not given, take min and max of err
If any nan's given, set their colour to nan_colour
"""
ndim = err.ndim
colmin = torch.tensor(colmin)[(None,)*ndim].to(err.device) # expand colmin to [..., 3]
colmax = torch.tensor(colmax)[(None,)*ndim].to(err.device)
colnan = torch.tensor(nan_colour)[(None,)*ndim].to(err.device)
vmin = err.nanmin() if vmin is None else vmin
vmax = err.nanmax() if vmax is None else vmax
fracs = (err - vmin) / (vmax - vmin)
rgba = (colmin + fracs.unsqueeze(-1) * (colmax - colmin)).to(err.device)
rgba = torch.clip(rgba, min=0, max=1)
rgba[torch.any(torch.isnan(rgba), dim=-1)] = colnan
return rgba
class Reporter:
"""Receive statements, on exit print all and save all to file"""
def __init__(self, out_file_loc):
self.lines = []
self.out_file_loc = out_file_loc
def __call__(self, line):
self.lines.append(line)
def __enter__(self, *args):
return self
def __exit__(self, *args):
[*map(print, self.lines)]
with open(self.out_file_loc, 'w') as outfile:
outfile.writelines([s + '\n' for s in self.lines])
def get_max_fit(exp_dir):
"""Search in an experiment directory for the fit_xx.obj with the highest value"""
f = lambda s: -1 if 'fit_' not in s else int(s.split('fit_')[1].split('.obj')[0])
return max(os.listdir(exp_dir), key=f)
def cutoff_slice_FIND(mesh, max_heel_height = 0.04, cutoff_height = 0.1):
"""Similar mesh slicing method to FIND: identify heel keypoint, slice off 1cm above"""
X, Y, Z = mesh.vertices.T
Xma = np.ma.array(X, mask= Z >= max_heel_height)
heel_idx = np.ma.argmin(Xma)
slice_height = min(Z[heel_idx] + cutoff_height, Z.max() - 5e-3)
return mesh.slice_plane([0, 0, slice_height], [0, 0, -1], cap=False)
def get_loghist(x, nbins):
hist, bins = np.histogram(x, bins=nbins)
logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins))
return dict(x=x, bins=logbins)
def eval_exp(exp_dir, render=True):
results = {} # return results as errors
if not any('fit_' in f for f in os.listdir(exp_dir)):
print(f"No fits for {exp_dir}, skipping...")
return
pred_obj_loc = os.path.join(exp_dir, get_max_fit(exp_dir))
# load settings to get folder
opts_loc = os.path.join(exp_dir, 'opts.json')
if not os.path.isfile(opts_loc):
print(f"No opts for {exp_dir}, skipping...")
return
with open(opts_loc) as infile:
settings = json.load(infile)
# assume GT OBJ loc is
# (1) saved in <data_folder>/mesh.obj if <data_folder> given
if 'data_folder' in settings:
gt_obj_loc = os.path.join(settings['data_folder'], 'mesh.obj')
# (2) saved in <exp_dir>/gt_mesh.obj otherwise
else:
gt_obj_loc = os.path.join(exp_dir, 'gt_mesh.obj')
eval_dir = os.path.join(exp_dir, 'eval')
os.makedirs(eval_dir, exist_ok=True)
with open(gt_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
gt_mesh_trimesh = trimesh.Trimesh(**d)
with open(pred_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
pred_mesh_trimesh = trimesh.Trimesh(**d)
# pre-process meshes, w/ cutoff
# Same method as used for Foot3D here for slicing GT
gt_mesh_trimesh = cutoff_slice_FIND(gt_mesh_trimesh)
if settings.get('model', 'FIND') == 'FIND':
# slice FIND faces
FIND_cutoff_surface = np.load(os.path.join(settings['find_pth'], 'templ_masked_faces.npy'))
FIND_sole_faces = np.load(os.path.join(settings['find_pth'], 'templ_sole_faces.npy'))
FIND_sole_verts = np.unique(np.ravel(pred_mesh_trimesh.faces[FIND_sole_faces])) # all vertices considered part of the sole
sole_vert_positions = pred_mesh_trimesh.vertices[FIND_sole_verts] # save sole vertex positions to refind them after mesh pre-processing
pred_mesh_trimesh.update_faces(~np.isin(np.arange(pred_mesh_trimesh.faces.shape[0]), FIND_cutoff_surface))
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# define a mask
# want to be able to define a mask on the FIND model, so that errors of verts in this mask aren't considered real -> pred, but are considered in reverse
# (for sole verts, unfair to count the error on them, but likewise incorrect to just remove them all, especially at the boundary)
# recalculate sole vertices
FIND_sole_vert_idxs = np.argwhere(np.all(pred_mesh_trimesh.vertices[:, None, :] == sole_vert_positions[None, ...], axis=-1))[:, 0]
FIND_sole_vertex_mask = np.isin(np.arange(pred_mesh_trimesh.vertices.shape[0]), FIND_sole_vert_idxs) # mask of which vertices correspond to the sole
FIND_sole_faces_mask = np.any(FIND_sole_vertex_mask[pred_mesh_trimesh.faces], axis=-1) # mask of which faces are in sole
else:
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# Convert to PyTorch3D
p3d_from_trimesh = lambda mesh: Meshes(verts=torch.from_numpy(np.asarray(mesh.vertices)[None, ...]).float(),
faces=torch.from_numpy(np.asarray(mesh.faces)[None, ...])).to(device)
gt_mesh = p3d_from_trimesh(gt_mesh_trimesh)
pred_mesh = p3d_from_trimesh(pred_mesh_trimesh)
# Sample vertices uniformly from mesh, returning vertex position, normal, and original face/vert idxs | gt_sample_dict = modified_sample(gt_mesh, num_samples=10_000, return_normals=True) | 1 | 2023-10-24 11:46:42+00:00 | 8k |
RobertCsordas/moe | layers/transformer/relative_preln_kvmem_transformer.py | [
{
"identifier": "ActivationFunction",
"path": "layers/transformer/transformer.py",
"snippet": "class TransformerEncoderLayer(torch.nn.Module):\nclass TransformerDecoderLayer(torch.nn.Module):\nclass TransformerDecoderBase(torch.nn.Module):\n class State:\nclass TransformerEncoder(torch.nn.Module):\nclass TransformerDecoder(TransformerDecoderBase):\nclass TransformerBase(torch.nn.Module):\nclass Transformer(TransformerBase):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0):\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None) -> torch.Tensor:\n def reset_parameters(self):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0):\n def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[AttentionMask] = None,\n memory_key_padding_mask: Optional[torch.Tensor] = None,\n full_target: Optional[torch.Tensor] = None, pos_offset: int = 0) -> torch.Tensor:\n def reset_parameters(self):\n def __init__(self, d_model: int):\n def create_state(self, batch_size: int, max_length: int, device: torch.device) -> State:\n def one_step_forward(self, state: State, data: torch.Tensor, *args, **kwargs):\n def __init__(self, layer, n_layers: int, d_model, *args, **kwargs):\n def forward(self, data: torch.Tensor, *args, **kwargs):\n def __init__(self, layer, n_layers: int, d_model: int, *args, **kwargs):\n def forward(self, data: torch.Tensor, *args, **kwargs):\ndef TransformerEncoderWithLayer(layer: Type[torch.nn.Module] = TransformerEncoderLayer):\ndef TransformerDecoderWithLayer(layer: Type[torch.nn.Module] = TransformerDecoderLayer):\n def __init__(self, encoder: torch.nn.Module, decoder: torch.nn.Module):\n def forward(self, src: torch.Tensor, tgt: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,\n src_mask: Optional[AttentionMask] = None):\n def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:\n def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6, num_decoder_layers: int = 6,\n dim_feedforward: int = 2048, dropout: float = 0.1, activation: ActivationFunction = F.relu,\n encoder_layer=TransformerEncoderWithLayer(), decoder_layer=TransformerDecoderWithLayer(),\n attention_dropout: float = 0):"
},
{
"identifier": "FixedRelativeMultiheadAttention",
"path": "layers/transformer/multi_head_relative_pos_attention.py",
"snippet": "def shift(posmat: torch.Tensor) -> torch.Tensor:\n def __init__(self, state_size: int, n_heads: int, dropout: float, projection_size: Optional[int] = None):\n def get_attention_scores(self, mask: Optional[torch.Tensor],\n q_content: torch.Tensor, k_content: torch.Tensor,\n q_pos: torch.Tensor, k_pos: torch.Tensor,\n pos_offset: int, ar_gate: Optional[torch.Tensor] = None) -> torch.Tensor:\n def _attention(self, mask: Optional[torch.Tensor],\n q_content: torch.Tensor, k_content: torch.Tensor,\n q_pos: torch.Tensor, k_pos: torch.Tensor,\n v: torch.Tensor, pos_offset: int,\n ar_gate: Optional[torch.Tensor] = None) -> [torch.Tensor, torch.Tensor]:\n def _get_pos_subset(self, pos_encoding: torch.Tensor, length: int, offset: int) -> torch.Tensor:\n def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, input_size: Optional[int] = None,\n projection_size: Optional[int] = None, pos_clamp: Optional[int] = None,\n test_pos_clamp: Optional[int] = None):\n def _create_buffer(self, max_len: int, clamp: Optional[int] = None):\n def get_pos(self, l: int, offset: int) -> torch.Tensor:\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,\n global_content_bias: bool = True, input_size: Optional[int] = None, absolute_gate: bool = False,\n projection_size: Optional[int] = None, output_size: Optional[int] = None, pos_clamp: Optional[int] = None,\n test_pos_clamp: Optional[int] = None):\n def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n pos_offset: Optional[int] = None, need_weights: bool = False):\n def reset_parameters(self):\nclass RelativeAttentionBase(MultiHeadAttentionBase):\nclass FixedRelativeMultiheadAttentionBase(RelativeAttentionBase):\nclass FixedRelativeMultiheadAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):"
},
{
"identifier": "reset_prenorm_params",
"path": "layers/transformer/transformer_preln.py",
"snippet": "def reset_prenorm_params(m: torch.nn.Module, n_layers: int):\n for layer in m.modules():\n if isinstance(layer, torch.nn.Linear):\n torch.nn.init.trunc_normal_(layer.weight)\n with torch.no_grad():\n layer.weight.mul_(math.sqrt(2 / (n_layers * layer.weight.shape[1])) / layer.weight.std())\n if layer.bias is not None:\n torch.nn.init.zeros_(layer.bias)\n elif isinstance(layer, torch.nn.LayerNorm):\n torch.nn.init.ones_(layer.weight)\n torch.nn.init.zeros_(layer.bias)"
},
{
"identifier": "LowrankApproximate2Layer",
"path": "layers/lowrank_approximate_2layer.py",
"snippet": "class LowrankApproximate2Layer(LoggingLayer, torch.nn.Module):\n def __init__(self, n_dim: int, n_keys: Union[int, Tuple[int, int]], n_heads: int = 1, knn: int = 32,\n dropout: float = 0, k_dim: Optional[int] = None, sparse: bool = False, stochastic: bool = False,\n custom_init: int = 0, weight_scale: float = 1.0,\n slice_values: bool = False, head_merge_topk: bool = False, load_balance: bool = False,\n query_proj: bool = True, randomize_indices: bool = False, dropout_mode: str = \"none\",\n query_bias: bool = False, approx: bool = False, factorize: bool = False, full_key: bool = False,\n key_redundancy_factor: int = 1, two_stage: bool = False, factors: Optional[List[int]] = None,\n head_exclusive: bool = False, activation: ActivationFunction = F.relu):\n\n super().__init__()\n\n # global parameters\n self.input_dim = n_dim\n self.output_dim = n_dim\n self.k_dim = k_dim or n_dim\n self.v_dim = n_dim\n n_keys = n_keys[0] if isinstance(n_keys, (tuple, list)) and len(n_keys) == 1 else n_keys\n self.key_sizes = [n_keys, n_keys] if isinstance(n_keys, int) else n_keys\n self.size = int(np.prod(self.key_sizes))\n self.heads = n_heads\n self.knn = knn\n self.stochastic = stochastic\n self.slice_values = slice_values\n self.head_merge_topk = head_merge_topk\n self.load_balance = load_balance\n self.dropout_mode = dropout_mode\n self.approx = approx\n self.factorize = factorize\n self.full_key = full_key\n self.two_stage = two_stage\n self.head_exclusive = head_exclusive\n self.custom_init = custom_init\n self.activation = activation\n\n self.no_knn = all([k <= self.knn for k in self.key_sizes]) or self.knn == 0\n\n if self.factorize:\n if factors is not None:\n if np.prod(factors) != self.knn:\n raise ValueError(\"{factors} is not a factorization of {self.knn}\")\n self.k = factors\n else:\n self.k = U.decompose_factors(self.knn, 2)\n print(f\"Approximate2Layer: Using factorization: {self.k}\")\n\n\n assert self.dropout_mode in [\"none\", \"early\", \"late\", \"weight\", \"score\"]\n\n assert self.k_dim >= 2 and self.k_dim % 2 == 0\n\n assert (not slice_values) or (self.v_dim % self.heads == 0), \"Value dimension must be divisible by the num of heads.\"\n\n # dropout\n self.query_dropout = dropout\n\n if self.dropout_mode == \"early\":\n self.query_dropout = math.sqrt(self.query_dropout)\n\n # initialize keys / values\n self.real_vdim = (self.v_dim // n_heads) if slice_values else self.v_dim\n self.values = torch.nn.EmbeddingBag(self.size, self.real_vdim , mode='sum', sparse=sparse)\n\n self.keys = torch.nn.ParameterList([\n torch.nn.Parameter(torch.empty((self.heads, s * key_redundancy_factor, self.k_dim // (1 if self.full_key else 2)))) for s in self.key_sizes\n ])\n\n if self.two_stage:\n self.full_keys = torch.nn.Parameter(torch.empty((self.size, self.k_dim)))\n\n if self.head_exclusive:\n self.head_scales = torch.nn.Parameter(torch.zeros(self.size, self.heads+1))\n\n initializer = self.get_custom_init()\n for k in self.keys:\n initializer(k, std=n_dim ** -0.5 * weight_scale)\n\n if self.two_stage:\n initializer(self.full_keys, std=n_dim ** -0.5 * weight_scale)\n\n if custom_init in {0,1}:\n initializer(self.values.weight, std=n_dim ** -0.5 * weight_scale)\n elif custom_init in {2,4}:\n initializer(self.values.weight, std=(knn * self.heads) ** -0.5 * weight_scale)\n elif custom_init in {3,5}:\n initializer(self.values.weight, std=self.size ** -0.5 * weight_scale)\n else:\n raise ValueError(f\"Invalid custom_init: {custom_init}\")\n\n self.query_proj = torch.nn.Linear(n_dim, n_dim * n_heads, bias=query_bias) if query_proj else None\n\n self.register_buffer(\"usage_count\", torch.zeros(self.size, dtype=torch.long), persistent=False)\n\n self.register_buffer(\"seq\", torch.arange(self.knn, dtype=torch.long), persistent=False)\n self.register_buffer(\"head_shift\", (torch.arange(self.heads, dtype=torch.float) * (self.key_sizes[0] / n_heads)).long(), persistent=False)\n\n self.log_count = 0\n self.randomize_indices = randomize_indices and self.heads > 1\n\n def get_custom_init(self):\n return torch.nn.init.normal_ if self.custom_init in {0, 4, 5} else U.init.trunc_normal_\n\n def topk(self, x: torch.Tensor, dim: int = -1, k: Optional[int] = None) -> SparseAddress:\n k = k or self.knn\n\n if k >= x.shape[dim] or k == 0:\n d = [1] * x.ndim\n d[dim] = -1\n return SparseAddress(\n x, torch.arange(x.shape[dim], device=x.device, dtype=torch.long).view(*d).expand_as(x))\n\n if self.approx:\n x = x.view(*x.shape[:-1], k, -1)\n scores, ind = x.max(-1)\n return SparseAddress(scores, self.seq[:k] * x.shape[-1] + ind)\n else:\n return SparseAddress(*x.contiguous().topk(k, dim=dim, sorted=False))\n\n def merge_sub_address(self, addr1: SparseAddress, addr2: SparseAddress) -> SparseAddress:\n # cartesian product on best candidate keys\n addr = self.combine_address_product(addr1, addr2)\n\n if self.dropout_mode == \"late\":\n addr.scores = F.dropout(addr.scores, p=self.query_dropout, training=self.training)\n\n # select best scores with associated indices\n addr2 = self.topk(addr.scores)\n addr2.indices = addr.indices.gather(-1, addr2.indices)\n\n return addr2\n\n def get_score(self, scores1: torch.Tensor, scores2: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:\n i1 = indices // self.key_sizes[-1]\n i2 = indices % self.key_sizes[-1]\n\n # return scores1[:, i1] + scores2[:, i2]\n return scores1.gather(-1, i1) + scores2.gather(-1, i2)\n\n @property\n def is_load_balane(self):\n return self.training and self.load_balance\n\n def index_combine(self, indices1: torch.Tensor, indices2: torch.Tensor) -> torch.Tensor:\n # Must be in sync with get_scores and get_dense_score\n return indices1 * self.key_sizes[-1] + indices2\n\n def combine_address_simple(self, addr1: SparseAddress, addr2: SparseAddress) -> SparseAddress:\n return SparseAddress(\n addr1.scores + addr2.scores,\n self.index_combine(addr1.indices, addr2.indices)\n )\n\n def combine_address_product(self, addr1: SparseAddress, addr2: SparseAddress) -> SparseAddress:\n return SparseAddress(\n (addr1.scores.unsqueeze(-1) + addr2.scores.unsqueeze(-2)).flatten(start_dim=-2),\n self.index_combine(addr1.indices.unsqueeze(-1), addr2.indices.unsqueeze(-2)).flatten(start_dim=-2)\n )\n\n def score_project(self, query: torch.Tensor, key1: torch.Tensor, key2: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert query.dim() == 2 and query.size(1) == self.k_dim\n half = self.k_dim // 2\n\n # split query for product quantization\n if self.full_key:\n q1 = q2 = query\n else:\n q1 = query[:, :half] # (bs,half)\n q2 = query[:, half:] # (bs,half)\n\n # # compute indices with associated scores\n # if head_index % 2 == 1:\n # q1, q2 = q2, q1\n\n if self.dropout_mode == \"weight\":\n key1 = F.dropout(key1, p=self.query_dropout, training=self.training)\n key2 = F.dropout(key2, p=self.query_dropout, training=self.training)\n\n scores1 = F.linear(q1, key1, bias=None) # (bs,n_keys)\n scores2 = F.linear(q2, key2, bias=None) # (bs,n_keys)\n\n if self.dropout_mode == \"early\":\n scores1 = F.dropout(scores1, p=self.query_dropout, training=self.training)\n scores2 = F.dropout(scores2, p=self.query_dropout, training=self.training)\n\n return scores1, scores2\n\n def _get_indices(self, query: torch.Tensor, key1: torch.Tensor, key2: torch.Tensor, head_index: int) -> SparseAddress:\n \"\"\"\n Generate scores and indices for a specific head.\n \"\"\"\n scores1, scores2 = self.score_project(query, key1, key2)\n\n if self.factorize:\n addr1 = self.topk(scores1, k=self.k[0])\n addr2 = self.topk(scores2, k=self.k[1])\n\n addr1.indices = addr1.indices % self.key_sizes[0]\n addr2.indices = addr2.indices % self.key_sizes[1]\n\n res = self.combine_address_product(addr1, addr2)\n elif not self.approx:\n with torch.no_grad():\n addr1 = self.topk(scores1) # (bs,knn)\n addr2 = self.topk(scores2) # (bs,knn)\n\n addr1.indices = addr1.indices % self.key_sizes[0]\n addr2.indices = addr2.indices % self.key_sizes[1]\n\n res = self.merge_sub_address(addr1, addr2)\n\n res.scores = self.get_score(scores1, scores2, res.indices)\n else:\n addr1 = self.topk(scores1) # (bs,knn)\n addr2 = SparseAddress(*torch.max(scores2, -1, keepdim=True))\n\n # This order should be equivalent to the above but faster\n # addr1 = SparseAddress(*torch.max(scores1, -1, keepdim=True))\n # addr2 = self.topk(scores2)\n\n addr1.indices = addr1.indices % self.key_sizes[0]\n addr2.indices = addr2.indices % self.key_sizes[1]\n\n res = self.combine_address_simple(addr1, addr2)\n\n if self.head_exclusive:\n scale = torch.softmax(self.head_scales[res.indices], -1)[..., head_index]\n res.scores = res.scores * scale\n\n # assert (res2.scores == res.scores).all()\n # assert (res2.indices == res.indices).all()\n\n if self.is_load_balane:\n rind1 = torch.randint(0, self.key_sizes[0], addr1.indices.shape, dtype=addr1.indices.dtype, device=addr1.indices.device)\n rind2 = torch.randint(0, self.key_sizes[1], addr1.indices.shape, dtype=addr1.indices.dtype, device=addr1.indices.device)\n\n scores1_c = scores1.scatter(-1, addr1.indices, torch.zeros_like(addr1.indices, dtype=scores1.dtype))\n scores2_c = scores1.scatter(-1, addr2.indices, torch.zeros_like(addr2.indices, dtype=scores2.dtype))\n\n addr = SparseAddress(\n scores1_c.gather(-1, rind1) + scores2_c.gather(-1, rind2),\n self.index_combine(rind1, rind2)\n )\n\n res = SparseAddress.cat([res, addr], -1)\n\n return res\n\n def get_head_specific_queries(self, query: torch.Tensor) -> List[torch.Tensor]:\n if self.query_proj is not None:\n queries = query.view(-1, self.heads, self.k_dim).unbind(-2)\n else:\n query = query.view(-1, self.k_dim)\n queries = [query] * self.heads\n\n return queries\n\n def get_indices(self, query: torch.Tensor) -> SparseAddress:\n \"\"\"\n Generate scores and indices.\n \"\"\"\n\n queries = self.get_head_specific_queries(query)\n\n outputs = [self._get_indices(queries[i], self.keys[0][i], self.keys[1][i], i) for i in range(self.heads)]\n # for i in range(self.heads):\n # outputs[i].indices = (outputs[i].indices + (self.n_keys // self.heads) * i) % self.size\n\n if self.randomize_indices:\n for i in range(self.heads - 1):\n # outputs[i].indices = self.ind_perm[i][outputs[i].indices]\n outputs[i].indices = (outputs[i].indices + self.head_shift[i+1]) % self.size\n\n addr = SparseAddress.stack(outputs, -2)\n\n if self.head_merge_topk:\n addr2 = self.topk(addr.scores)\n addr2.indices = addr.indices.gather(-1, addr2.indices)\n addr = addr2\n\n return addr\n\n def get_dense_score(self, query: torch.Tensor, key1: torch.Tensor, key2: torch.Tensor) -> torch.Tensor:\n scores1, scores2 = self.score_project(query, key1, key2)\n return (scores1.unsqueeze(-1) + scores2.unsqueeze(-2)).flatten(-2)\n\n def get_indices_dense(self, query: torch.Tensor) -> torch.Tensor:\n queries = self.get_head_specific_queries(query)\n outputs = [self.get_dense_score(queries[i], self.keys[0][i], self.keys[1][i]) for i in range(self.heads)]\n return sum(outputs)\n\n def forward_dense(self, query: torch.Tensor) -> torch.Tensor:\n assert not self.two_stage\n\n scores = self.get_indices_dense(query)\n\n scores = self.activation(scores) # (bs*heads,knn)\n if self.dropout_mode == \"score\":\n scores = F.dropout(scores, p=self.query_dropout, training=self.training)\n\n return F.linear(scores, self.values.weight.T)\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Read from the memory.\n \"\"\"\n # input dimensions\n assert input.shape[-1] == self.input_dim\n prefix_shape = input.shape[:-1]\n bs = np.prod(prefix_shape)\n\n # compute query\n query = self.query_proj(input) if self.query_proj is not None else input\n # query = query.contiguous().view(bs * self.heads, self.k_dim) # (bs*heads,k_dim)\n\n if self.no_knn:\n return self.forward_dense(query).view_as(input)\n\n # retrieve indices and scores\n addr = self.get_indices(query) # (bs*heads,knn)\n\n # if self.head_exclusive:\n # scales = torch.softmax(self.head_scales[addr.indices], -1)\n # addr.scores = addr.scores * scales\n\n if not self.head_merge_topk:\n k = self.knn * (2 if self.is_load_balane else 1)\n addr = addr.view(-1, k) if self.slice_values else addr.view(bs, -1)\n\n # weighted sum of values\n if self.two_stage:\n real_keys = self.full_keys[addr.indices]\n addr.scores = torch.einsum(\"btd,bd->bt\", real_keys, input.flatten(0, -2)) * torch.sigmoid(addr.scores)\n\n addr.scores = self.activation(addr.scores) # (bs*heads,knn)\n if self.dropout_mode == \"score\":\n addr.scores = F.dropout(addr.scores, p=self.query_dropout, training=self.training)\n\n output = self.values(addr.indices, per_sample_weights=addr.scores.type_as(self.values.weight)) # (bs,v_dim)\n\n # if self.load_balance:\n # self.usage_count.index_add_(0, addr.indices.flatten(), torch.ones(1, dtype=torch.long, device=addr.indices.device).expand(addr.indices.nelement()))\n\n if self.training:\n self.usage_count.index_add_(0, addr.indices.flatten(), torch.ones(1, dtype=torch.long, device=addr.indices.device).expand(addr.indices.nelement()))\n self.log_count += 1\n if self.log_count % 100 == 0:\n n_used = (self.usage_count > 0).long().sum()\n self.log(\"n_nonzero\", n_used)\n self.log(\"n_zero\", self.size - n_used)\n self.usage_count.fill_(0)\n\n # reshape output\n return output.view_as(input) # (...,v_dim)"
}
] | from typing import Optional, List, Union, Tuple
from .transformer import ActivationFunction
from .multi_head_relative_pos_attention import FixedRelativeMultiheadAttention, AttentionMask
from .transformer_preln import reset_prenorm_params
from layers.lowrank_approximate_2layer import LowrankApproximate2Layer
import torch
import torch.nn
import torch.nn.functional as F
import math | 6,356 |
class PrelnRelativeKVMemTransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, n_keys: Union[int, Tuple[int, int]], n_layers: int, dim_feedforward=2048,
dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,
test_pos_clamp: Optional[int] = None, pkm_heads: int = 1, pkm_stochastic: bool = True,
pkm_custom_init: int = 0, pkm_slice_values: bool = False,
pkm_knn: int = 32, linproj: bool = False, head_merge_topk: bool = False, load_balance: bool = True,
kvmem_dropout: str = "none", kvmem_randomize_indices: bool = False, kvmem_query_bias: bool = False,
standard_parallel: bool = False, approx_topk: bool = False, factorize: bool = False,
full_key: bool = False, key_redundancy_factor: int = 1, two_stage: bool = False,
factors: Optional[List[int]] = None, head_exclusive: bool = False,
head_projection_size: Optional[int] = None):
super().__init__()
self.self_attn = FixedRelativeMultiheadAttention(
d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,
projection_size=head_projection_size)
self.pkm = LowrankApproximate2Layer(
d_model, n_keys, pkm_heads, stochastic=pkm_stochastic, custom_init=pkm_custom_init,
weight_scale=math.sqrt(2.0 / n_layers), slice_values=pkm_slice_values, knn=pkm_knn,
head_merge_topk=head_merge_topk, load_balance=load_balance, dropout=dropout,
query_proj=linproj, randomize_indices=kvmem_randomize_indices, dropout_mode=kvmem_dropout,
query_bias=kvmem_query_bias, approx=approx_topk, factorize=factorize, full_key=full_key,
key_redundancy_factor=key_redundancy_factor, two_stage=two_stage, factors=factors,
head_exclusive=head_exclusive, activation=activation)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout = torch.nn.Dropout(dropout)
self.activation = activation
self.standard_parallel = standard_parallel
reset_prenorm_params(self, n_layers)
if self.standard_parallel:
self.linear1 = torch.nn.Linear(d_model, dim_feedforward, bias=False)
self.linear2 = torch.nn.Linear(dim_feedforward, d_model, bias=False)
initializer = self.pkm.get_custom_init()
s_real = dim_feedforward + self.pkm.size
# s_real = dim_feedforward + self.pkm.heads * self.pkm.knn
initializer(self.linear2.weight, std=math.sqrt(2 / (n_layers * s_real)))
initializer(self.pkm.values.weight, std=math.sqrt(2 / (n_layers * s_real)))
initializer(self.linear1.weight, std=math.sqrt(2 / (n_layers * d_model)))
if self.pkm.two_stage:
initializer(self.pkm.full_keys, std=math.sqrt(2 / (n_layers * d_model)))
|
class PrelnRelativeKVMemTransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, n_keys: Union[int, Tuple[int, int]], n_layers: int, dim_feedforward=2048,
dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,
test_pos_clamp: Optional[int] = None, pkm_heads: int = 1, pkm_stochastic: bool = True,
pkm_custom_init: int = 0, pkm_slice_values: bool = False,
pkm_knn: int = 32, linproj: bool = False, head_merge_topk: bool = False, load_balance: bool = True,
kvmem_dropout: str = "none", kvmem_randomize_indices: bool = False, kvmem_query_bias: bool = False,
standard_parallel: bool = False, approx_topk: bool = False, factorize: bool = False,
full_key: bool = False, key_redundancy_factor: int = 1, two_stage: bool = False,
factors: Optional[List[int]] = None, head_exclusive: bool = False,
head_projection_size: Optional[int] = None):
super().__init__()
self.self_attn = FixedRelativeMultiheadAttention(
d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,
projection_size=head_projection_size)
self.pkm = LowrankApproximate2Layer(
d_model, n_keys, pkm_heads, stochastic=pkm_stochastic, custom_init=pkm_custom_init,
weight_scale=math.sqrt(2.0 / n_layers), slice_values=pkm_slice_values, knn=pkm_knn,
head_merge_topk=head_merge_topk, load_balance=load_balance, dropout=dropout,
query_proj=linproj, randomize_indices=kvmem_randomize_indices, dropout_mode=kvmem_dropout,
query_bias=kvmem_query_bias, approx=approx_topk, factorize=factorize, full_key=full_key,
key_redundancy_factor=key_redundancy_factor, two_stage=two_stage, factors=factors,
head_exclusive=head_exclusive, activation=activation)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout = torch.nn.Dropout(dropout)
self.activation = activation
self.standard_parallel = standard_parallel
reset_prenorm_params(self, n_layers)
if self.standard_parallel:
self.linear1 = torch.nn.Linear(d_model, dim_feedforward, bias=False)
self.linear2 = torch.nn.Linear(dim_feedforward, d_model, bias=False)
initializer = self.pkm.get_custom_init()
s_real = dim_feedforward + self.pkm.size
# s_real = dim_feedforward + self.pkm.heads * self.pkm.knn
initializer(self.linear2.weight, std=math.sqrt(2 / (n_layers * s_real)))
initializer(self.pkm.values.weight, std=math.sqrt(2 / (n_layers * s_real)))
initializer(self.linear1.weight, std=math.sqrt(2 / (n_layers * d_model)))
if self.pkm.two_stage:
initializer(self.pkm.full_keys, std=math.sqrt(2 / (n_layers * d_model)))
| def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None, | 1 | 2023-10-16 11:26:45+00:00 | 8k |
enkeejunior1/Diffusion-Pullback | src/models/improved_ddpm_old/unet.py | [
{
"identifier": "convert_module_to_f16",
"path": "src/models/improved_ddpm_old/fp16_util.py",
"snippet": "def convert_module_to_f16(l):\n \"\"\"\n Convert primitive modules to float16.\n \"\"\"\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):\n l.weight.data = l.weight.data.half()\n if l.bias is not None:\n l.bias.data = l.bias.data.half()"
},
{
"identifier": "convert_module_to_f32",
"path": "src/models/improved_ddpm_old/fp16_util.py",
"snippet": "def convert_module_to_f32(l):\n \"\"\"\n Convert primitive modules to float32, undoing convert_module_to_f16().\n \"\"\"\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):\n l.weight.data = l.weight.data.float()\n if l.bias is not None:\n l.bias.data = l.bias.data.float()"
},
{
"identifier": "checkpoint",
"path": "src/models/improved_ddpm_old/nn.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "conv_nd",
"path": "src/models/improved_ddpm_old/nn.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "linear",
"path": "src/models/improved_ddpm_old/nn.py",
"snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)"
},
{
"identifier": "avg_pool_nd",
"path": "src/models/improved_ddpm_old/nn.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "src/models/improved_ddpm_old/nn.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "src/models/improved_ddpm_old/nn.py",
"snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)"
},
{
"identifier": "timestep_embedding",
"path": "src/models/improved_ddpm_old/nn.py",
"snippet": "def timestep_embedding(timesteps, embedding_dim, device=None):\n \"\"\"\n This matches the implementation in Denoising Diffusion Probabilistic Models:\n From Fairseq.\n Build sinusoidal embeddings.\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n assert len(timesteps.shape) == 1\n\n device = timesteps.device if device is None else device\n timesteps = timesteps.to(device)\n\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = th.exp(th.arange(half_dim, device=timesteps.device, dtype=th.float32) * -emb)\n emb = timesteps.float()[:, None] * emb[None, :]\n emb = th.cat([th.sin(emb), th.cos(emb)], dim=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = th.nn.functional.pad(emb, (0, 1, 0, 0))\n emb = emb.to(device)\n return emb"
}
] | import time
import torchvision.utils as tvu
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce, repeat, einsum
from abc import abstractmethod
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
) | 3,935 |
if i == max_iter - 1:
print('last convergence : ', convergence)
u, s, vT = u.view(-1, c_o*w_o*h_o).T.detach(), s.sqrt().detach(), v.view(-1, c_i*w_i*h_i).detach()
return u, s, vT
##############
# submodules #
##############
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
| """
Codebase for "Improved Denoising Diffusion Probabilistic Models".
"""
#########
# model #
#########
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
args,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
ch = input_ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=int(model_channels * mult),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(model_channels * mult)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
)
############
# Pullback #
############
self.device = args.device
self.dtype = args.dtype
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(
self, x, t, u=None, return_sigma=False, **kwargs
):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param t: a 1-D batch of t.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
# assert (y is not None) == (
# self.num_classes is not None
# ), "must specify y if and only if the model is class-conditional"
t = t.unsqueeze(0) if len(t.shape) == 0 else t
t = t.to(device=self.device, dtype=self.dtype)
hs = []
emb = self.time_embed(timestep_embedding(t, self.model_channels))
# if self.num_classes is not None:
# assert y.shape == (x.shape[0],)
# emb = emb + self.label_emb(y)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
if u is not None:
h = h + u.view(-1, *h[-1].shape)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb)
h = h.type(x.dtype)
h = self.out(h)
return h
# et, logvar_learned = th.split(h, h.shape[1] // 2, dim=1)
# if return_sigma:
# return et, logvar_learned
# else:
# return et
def get_h(
self, x, t, **kwargs
):
if isinstance(t, int):
t = th.tensor([t]).to(self.device)
elif isinstance(t, th.Tensor):
t = t.unsqueeze(0) if len(t.shape) == 0 else t
else:
raise ValueError('t must be int or torch.Tensor')
emb = self.time_embed(timestep_embedding(t, self.model_channels))
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
h = self.middle_block(h, emb)
return h
def inv_jac_xt(
self, x=None, t=None, op=None, block_idx=None,
u=None, perturb_h=1e-1,
):
# original h
h = self.get_h(
x=x, t=t, op=op, block_idx=block_idx,
)
# get number of h space directions
if len(u.shape) > 1:
pca_rank = u.size(1)
h = h.repeat(pca_rank, 1, 1, 1).detach()
u = rearrange(u, '(c w h) k -> k c w h', c=h.size(1), w=h.size(2), h=h.size(3))
else:
pca_rank = 1
u = u.view(*h.shape)
# perturb h
perturbed_h = h + perturb_h * u
# get corresponding x direction (argmin_v perturbed_h - f(xt + v))
jacx = lambda x : (perturbed_h - self.get_h(
x=x, t=t, op=op, block_idx=block_idx,
)).view(pca_rank, -1).norm(dim=-1)
jac = th.autograd.functional.jacobian(jacx, x)
# normalize direction
vT = normalize_wrt_batch(jac).view(pca_rank, -1)
return vT
def local_encoder_pullback_xt(
self, x=None, t=None, op=None, block_idx=None, pca_rank=16, chunk_size=25,
min_iter=10, max_iter=100, convergence_threshold=1e-3,
):
'''
Args
- sample : zt
- op : ['down', 'mid', 'up']
- block_idx : op == down, up : [0,1,2,3], op == mid : [0]
- pooling : ['pixel-sum', 'channel-sum', 'single-channel', 'multiple-channel']
Returns
- h : hidden feature
'''
# necessary variables
num_chunk = pca_rank // chunk_size if pca_rank % chunk_size == 0 else pca_rank // chunk_size + 1
get_h = lambda x : self.get_h(
x, t=t, op=op, block_idx=block_idx,
)
h_shape = get_h(x).shape
print('h_shape : ', h_shape)
c_i, w_i, h_i = x.size(1), x.size(2), x.size(3)
c_o, w_o, h_o = h_shape[1], h_shape[2], h_shape[3]
a = th.tensor(0., device=x.device)
# Algorithm 1
vT = th.randn(c_i*w_i*h_i, pca_rank, device=x.device)
vT, _ = th.linalg.qr(vT)
v = vT.T
v = v.view(-1, c_i, w_i, h_i)
for i in range(max_iter):
v_prev = v.detach().cpu().clone()
u = []
time_s = time.time()
v_buffer = list(v.chunk(num_chunk))
for vi in v_buffer:
# g = lambda a : get_h(x + a*vi.unsqueeze(0) if vi.size(0) == v.size(-1) else x + a*vi)
g = lambda a : get_h(x + a*vi)
ui = th.func.jacfwd(g, argnums=0, has_aux=False, randomness='error')(a)
u.append(ui.detach().cpu().clone())
time_e = time.time()
print('single v jacfwd t ==', time_e - time_s)
u = th.cat(u, dim=0)
u = u.to(x.device)
# time_s = time.time()
# g = lambda a : get_h(x + a*v)
# u = th.func.jacfwd(g, argnums=0, has_aux=False, randomness='error')(a)
# time_e = time.time()
# print('single vi jacfwd t ==', time_e - time_s)
g = lambda x : einsum(u, get_h(x), 'b c w h, i c w h -> b')
v_ = th.autograd.functional.jacobian(g, x)
v_ = v_.view(-1, c_i*w_i*h_i)
_, s, v = th.linalg.svd(v_, full_matrices=False)
v = v.view(-1, c_i, w_i, h_i)
u = u.view(-1, c_o, w_o, h_o)
convergence = th.dist(v_prev, v.detach().cpu())
print(f'power method : {i}-th step convergence : ', convergence)
if th.allclose(v_prev, v.detach().cpu(), atol=convergence_threshold) and (i > min_iter):
print('reach convergence threshold : ', convergence)
break
if i == max_iter - 1:
print('last convergence : ', convergence)
u, s, vT = u.view(-1, c_o*w_o*h_o).T.detach(), s.sqrt().detach(), v.view(-1, c_i*w_i*h_i).detach()
return u, s, vT
##############
# submodules #
##############
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels | self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) | 5 | 2023-10-21 04:08:44+00:00 | 8k |
NVIDIA-Omniverse/IsaacSim-Automator | src/tests/deployer.test.py | [
{
"identifier": "c",
"path": "src/python/config.py",
"snippet": ""
},
{
"identifier": "Deployer",
"path": "src/python/deployer.py",
"snippet": "class Deployer:\n def __init__(self, params, config):\n self.tf_outputs = {}\n self.params = params\n self.config = config\n self.existing_behavior = None\n\n # save original params so we can recreate command line\n self.input_params = params.copy()\n\n # convert \"in_china\"\n self.params[\"in_china\"] = {\"yes\": True, \"no\": False, \"auto\": False}[\n self.params[\"in_china\"]\n ]\n\n # create state directory if it doesn't exist\n os.makedirs(self.config[\"state_dir\"], exist_ok=True)\n\n # print complete command line\n if self.params[\"debug\"]:\n click.echo(colorize_info(\"* Command:\\n\" + self.recreate_command_line()))\n\n def __del__(self):\n # update meta info\n self.save_meta()\n\n def save_meta(self):\n \"\"\"\n Save command parameters in json file, just in case\n \"\"\"\n\n meta_file = (\n f\"{self.config['state_dir']}/{self.params['deployment_name']}/meta.json\"\n )\n\n data = {\n \"command\": self.recreate_command_line(separator=\" \"),\n \"input_params\": self.input_params,\n \"params\": self.params,\n \"config\": self.config,\n }\n\n Path(meta_file).parent.mkdir(parents=True, exist_ok=True)\n Path(meta_file).write_text(json.dumps(data, indent=4))\n\n if self.params[\"debug\"]:\n click.echo(colorize_info(f\"* Meta info saved to '{meta_file}'\"))\n\n def read_meta(self):\n return read_meta(\n self.params[\"deployment_name\"],\n self.params[\"debug\"],\n )\n\n def recreate_command_line(self, separator=\" \\\\\\n\"):\n \"\"\"\n Recreate command line\n \"\"\"\n\n command_line = sys.argv[0]\n\n for k, v in self.input_params.items():\n k = k.replace(\"_\", \"-\")\n\n if isinstance(v, bool):\n if v:\n command_line += separator + \"--\" + k\n else:\n not_prefix = \"--no-\"\n\n if k in [\"from-image\"]:\n not_prefix = \"--not-\"\n\n command_line += separator + not_prefix + k\n else:\n command_line += separator + \"--\" + k + \" \"\n\n if isinstance(v, str):\n command_line += \"'\" + shlex.quote(v) + \"'\"\n else:\n command_line += str(v)\n\n return command_line\n\n def ask_existing_behavior(self):\n \"\"\"\n Ask what to do if deployment already exists\n \"\"\"\n\n deployment_name = self.params[\"deployment_name\"]\n existing = self.params[\"existing\"]\n\n self.existing_behavior = existing\n\n if existing == \"ask\" and os.path.isfile(\n f\"{self.config['state_dir']}/{deployment_name}/.tfvars\"\n ):\n self.existing_behavior = click.prompt(\n text=colorize_prompt(\n \"* Deploymemnt exists, what would you like to do? See --help for details.\"\n ),\n type=click.Choice([\"repair\", \"modify\", \"replace\", \"run_ansible\"]),\n default=\"replace\",\n )\n\n if (\n self.existing_behavior == \"repair\"\n or self.existing_behavior == \"run_ansible\"\n ):\n # restore params from meta file\n r = self.read_meta()\n self.params = r[\"params\"]\n\n click.echo(\n colorize_info(\n f\"* Repairing existing deployment \\\"{self.params['deployment_name']}\\\"...\"\n )\n )\n\n # update meta info (with new value for existing_behavior)\n self.save_meta()\n\n # destroy existing deployment``\n if self.existing_behavior == \"replace\":\n debug = self.params[\"debug\"]\n click.echo(colorize_info(\"* Deleting existing deployment...\"))\n\n shell_command(\n command=f'{self.config[\"app_dir\"]}/destroy \"{deployment_name}\" --yes'\n + f' {\"--debug\" if debug else \"\"}',\n verbose=debug,\n )\n\n # update meta info if deployment was destroyed\n self.save_meta()\n\n def validate_ngc_api_key(self, image, restricted_image=False):\n \"\"\"\n Check if NGC API key allows to log in and has access to appropriate NGC image\n @param image: NGC image to check access to\n @param restricted_image: If image is restricted to specific org/team?\n \"\"\"\n\n debug = self.params[\"debug\"]\n ngc_api_key = self.params[\"ngc_api_key\"]\n ngc_api_key_check = self.params[\"ngc_api_key_check\"]\n\n # extract org and team from the image path\n\n r = re.findall(\n \"^nvcr\\\\.io/([a-z0-9\\\\-_]+)/([a-z0-9\\\\-_]+/)?[a-z0-9\\\\-_]+:[a-z0-9\\\\-_.]+$\",\n image,\n )\n\n ngc_org, ngc_team = r[0]\n ngc_team = ngc_team.rstrip(\"/\")\n\n if ngc_org == \"nvidia\":\n click.echo(\n colorize_info(\n \"* Access to docker image can't be checked for NVIDIA org. But you'll be fine. Fingers crossed.\"\n )\n )\n return\n\n if debug:\n click.echo(colorize_info(f'* Will check access to NGC Org: \"{ngc_org}\"'))\n click.echo(colorize_info(f'* Will check access to NGC Team: \"{ngc_team}\"'))\n\n if ngc_api_key_check and ngc_api_key != \"none\":\n click.echo(colorize_info(\"* Validating NGC API key... \"))\n r = check_ngc_access(\n ngc_api_key=ngc_api_key, org=ngc_org, team=ngc_team, verbose=debug\n )\n if r == 100:\n raise Exception(colorize_error(\"NGC API key is invalid.\"))\n # only check access to org/team if restricted image is deployed\n elif restricted_image and (r == 101 or r == 102):\n raise Exception(\n colorize_error(\n f'NGC API key is valid but you don\\'t have access to image \"{image}\".'\n )\n )\n click.echo(colorize_info((\"* NGC API Key is valid!\")))\n\n def create_tfvars(self, tfvars: dict = {}):\n \"\"\"\n - Check if deployment with this deployment_name exists and deal with it\n - Create/update tfvars file\n\n Expected values for \"existing_behavior\" arg:\n - repair: keep tfvars/tfstate, don't ask for user input\n - modify: keep tfstate file, update tfvars file with user input\n - replace: delete tfvars/tfstate files\n - run_ansible: keep tfvars/tfstate, don't ask for user input, skip terraform steps\n \"\"\"\n\n # default values common for all clouds\n tfvars.update(\n {\n \"isaac_enabled\": self.params[\"isaac\"]\n if \"isaac\" in self.params\n else False,\n #\n \"isaac_instance_type\": self.params[\"isaac_instance_type\"]\n if \"isaac_instance_type\" in self.params\n else \"none\",\n #\n \"prefix\": self.params[\"prefix\"],\n \"ssh_port\": self.params[\"ssh_port\"],\n #\n \"from_image\": self.params[\"from_image\"]\n if \"from_image\" in self.params\n else False,\n #\n \"deployment_name\": self.params[\"deployment_name\"],\n }\n )\n\n debug = self.params[\"debug\"]\n deployment_name = self.params[\"deployment_name\"]\n\n # deal with existing deployment:\n\n tfvars_file = f\"{self.config['state_dir']}/{deployment_name}/.tfvars\"\n tfstate_file = f\"{self.config['state_dir']}/{deployment_name}/.tfstate\"\n\n # tfvars\n if os.path.exists(tfvars_file):\n if (\n self.existing_behavior == \"modify\"\n or self.existing_behavior == \"overwrite\"\n ):\n os.remove(tfvars_file)\n if debug:\n click.echo(colorize_info(f'* Deleted \"{tfvars_file}\"...'))\n\n # tfstate\n if os.path.exists(tfstate_file):\n if self.existing_behavior == \"overwrite\":\n os.remove(tfstate_file)\n if debug:\n click.echo(colorize_info(f'* Deleted \"{tfstate_file}\"...'))\n\n # create tfvars file\n if (\n self.existing_behavior == \"modify\"\n or self.existing_behavior == \"overwrite\"\n or not os.path.exists(tfvars_file)\n ):\n self._write_tfvars_file(path=tfvars_file, tfvars=tfvars)\n\n def _write_tfvars_file(self, path: str, tfvars: dict):\n \"\"\"\n Write tfvars file\n \"\"\"\n\n debug = self.params[\"debug\"]\n\n if debug:\n click.echo(colorize_info(f'* Created tfvars file \"{path}\"'))\n\n # create <dn>/ directory if it doesn't exist\n Path(path).parent.mkdir(parents=True, exist_ok=True)\n\n with open(path, \"w\") as f:\n for key, value in tfvars.items():\n # convert booleans to strings\n if isinstance(value, bool):\n value = {\n True: \"true\",\n False: \"false\",\n }[value]\n\n # format key names\n key = key.replace(\"-\", \"_\")\n\n # write values\n if isinstance(value, str):\n value = value.replace('\"', '\\\\\"')\n f.write(f'{key} = \"{value}\"\\n')\n elif isinstance(value, list):\n f.write(f\"{key} = \" + str(value).replace(\"'\", '\"') + \"\\n\")\n else:\n f.write(f\"{key} = {value}\\n\")\n\n def create_ansible_inventory(self, write: bool = True):\n \"\"\"\n Create Ansible inventory, return it as text\n Write to file if write=True\n \"\"\"\n\n debug = self.params[\"debug\"]\n deployment_name = self.params[\"deployment_name\"]\n\n ansible_vars = self.params.copy()\n\n # add config\n ansible_vars[\"config\"] = self.config\n\n # get missing values from terraform\n for k in [\n \"isaac_ip\",\n \"ovami_ip\",\n \"cloud\",\n ]:\n if k not in self.params or ansible_vars[k] is None:\n ansible_vars[k] = self.tf_output(k)\n\n # convert booleans to ansible format\n ansible_booleans = {True: \"true\", False: \"false\"}\n for k, v in ansible_vars.items():\n if isinstance(v, bool):\n ansible_vars[k] = ansible_booleans[v]\n\n template = Path(f\"{self.config['ansible_dir']}/inventory.template\").read_text()\n res = template.format(**ansible_vars)\n\n # write to file\n if write:\n inventory_file = f\"{self.config['state_dir']}/{deployment_name}/.inventory\"\n Path(inventory_file).parent.mkdir(parents=True, exist_ok=True) # create dir\n Path(inventory_file).write_text(res) # write file\n if debug:\n click.echo(\n colorize_info(\n f'* Created Ansible inventory file \"{inventory_file}\"'\n )\n )\n\n return res\n\n def initialize_terraform(self, cwd: str):\n \"\"\"\n Initialize Terraform via shell command\n cwd: directory where terraform scripts are located\n \"\"\"\n debug = self.params[\"debug\"]\n\n shell_command(\n f\"terraform init -upgrade -no-color -input=false {' > /dev/null' if not debug else ''}\",\n verbose=debug,\n cwd=cwd,\n )\n\n def run_terraform(self, cwd: str):\n \"\"\"\n Apply Terraform via shell command\n cwd: directory where terraform scripts are located\n \"\"\"\n\n debug = self.params[\"debug\"]\n deployment_name = self.params[\"deployment_name\"]\n\n shell_command(\n \"terraform apply -auto-approve \"\n + f\"-state={self.config['state_dir']}/{deployment_name}/.tfstate \"\n + f\"-var-file={self.config['state_dir']}/{deployment_name}/.tfvars\",\n cwd=cwd,\n verbose=debug,\n )\n\n def export_ssh_key(self):\n \"\"\"\n Export SSH key from Terraform state\n \"\"\"\n\n debug = self.params[\"debug\"]\n deployment_name = self.params[\"deployment_name\"]\n\n shell_command(\n f\"terraform output -state={self.config['state_dir']}/{deployment_name}/.tfstate -raw ssh_key\"\n + f\" > {self.config['state_dir']}/{deployment_name}/key.pem && \"\n + f\"chmod 0600 {self.config['state_dir']}/{deployment_name}/key.pem\",\n verbose=debug,\n )\n\n def run_ansible(self, playbook_name: str, cwd: str):\n \"\"\"\n Run Ansible playbook via shell command\n \"\"\"\n\n debug = self.params[\"debug\"]\n deployment_name = self.params[\"deployment_name\"]\n\n shell_command(\n f\"ansible-playbook -i {self.config['state_dir']}/{deployment_name}/.inventory \"\n + f\"{playbook_name}.yml {'-vv' if self.params['debug'] else ''}\",\n cwd=cwd,\n verbose=debug,\n )\n\n def run_all_ansible(self):\n # run ansible for isaac\n if \"isaac\" in self.params and self.params[\"isaac\"]:\n click.echo(colorize_info(\"* Running Ansible for Isaac Sim...\"))\n self.run_ansible(playbook_name=\"isaac\", cwd=f\"{self.config['ansible_dir']}\")\n\n # run ansible for ovami\n # todo: move to ./deploy-aws\n if \"ovami\" in self.params and self.params[\"ovami\"]:\n click.echo(colorize_info(\"* Running Ansible for OV AMI...\"))\n self.run_ansible(playbook_name=\"ovami\", cwd=f\"{self.config['ansible_dir']}\")\n\n def tf_output(self, key: str, default: str = \"\"):\n \"\"\"\n Read Terraform output.\n Cache read values in self._tf_outputs.\n \"\"\"\n\n if key not in self.tf_outputs:\n debug = self.params[\"debug\"]\n deployment_name = self.params[\"deployment_name\"]\n\n r = shell_command(\n f\"terraform output -state='{self.config['state_dir']}/{deployment_name}/.tfstate' -raw '{key}'\",\n capture_output=True,\n exit_on_error=False,\n verbose=debug,\n )\n\n if r.returncode == 0:\n self.tf_outputs[key] = r.stdout.decode()\n else:\n if self.params[\"debug\"]:\n click.echo(\n colorize_error(\n f\"* Warning: Terraform output '{key}' cannot be read.\"\n ),\n err=True,\n )\n self.tf_outputs[key] = default\n\n # update meta file to reflect tf outputs\n self.save_meta()\n\n return self.tf_outputs[key]\n\n def upload_user_data(self):\n shell_command(\n f'./upload \"{self.params[\"deployment_name\"]}\" '\n + f'{\"--debug\" if self.params[\"debug\"] else \"\"}',\n cwd=self.config[\"app_dir\"],\n verbose=self.params[\"debug\"],\n exit_on_error=True,\n capture_output=False,\n )\n\n # generate ssh connection command for the user\n def ssh_connection_command(self, ip: str):\n r = f\"ssh -i state/{self.params['deployment_name']}/key.pem \"\n r += f\"-o StrictHostKeyChecking=no ubuntu@{ip}\"\n if self.params[\"ssh_port\"] != 22:\n r += f\" -p {self.params['ssh_port']}\"\n return r\n\n def output_deployment_info(self, extra_text: str = \"\", print_text=True):\n \"\"\"\n Print connection info for the user\n Save info to file (_state_dir_/_deployment_name_/info.txt)\n \"\"\"\n\n isaac = \"isaac\" in self.params and self.params[\"isaac\"]\n ovami = \"ovami\" in self.params and self.params[\"ovami\"]\n\n vnc_password = self.params[\"vnc_password\"]\n deployment_name = self.params[\"deployment_name\"]\n\n # templates\n nomachine_instruction = f\"\"\"* To connect to __app__ via NoMachine:\n\n0. Download NoMachine client at https://downloads.nomachine.com/, install and launch it.\n1. Click \"Add\" button.\n2. Enter Host: \"__ip__\".\n3. In \"Configuration\" > \"Use key-based authentication with a key you provide\",\n select file \"state/{deployment_name}/key.pem\".\n4. Click \"Connect\" button.\n5. Enter \"ubuntu\" as a username when prompted.\n\"\"\"\n\n vnc_instruction = f\"\"\"* To connect to __app__ via VNC:\n\n- IP: __ip__\n- Port: 5900\n- Password: {vnc_password}\"\"\"\n\n nonvc_instruction = f\"\"\"* To connect to __app__ via noVNC:\n\n1. Open http://__ip__:6080/vnc.html?host=__ip__&port=6080 in your browser.\n2. Click \"Connect\" and use password \\\"{vnc_password}\\\"\"\"\"\n\n # print connection info\n\n instructions_file = f\"{self.config['state_dir']}/{deployment_name}/info.txt\"\n instructions = \"\"\n\n if isaac:\n instructions += f\"\"\"{'*' * (29+len(self.tf_output('isaac_ip')))}\n* Isaac Sim is deployed at {self.tf_output('isaac_ip')} *\n{'*' * (29+len(self.tf_output('isaac_ip')))}\n\n* To connect to Isaac Sim via SSH:\n\n{self.ssh_connection_command(self.tf_output('isaac_ip'))}\n\n{nonvc_instruction}\n\n{nomachine_instruction}\"\"\".replace(\n \"__app__\", \"Isaac Sim\"\n ).replace(\n \"__ip__\", self.tf_output(\"isaac_ip\")\n )\n\n # todo: move to ./deploy-aws\n if ovami:\n instructions += f\"\"\"\\n\n* OV AMI is deployed at {self.tf_output('ovami_ip')}\n\n* To connect to OV AMI via SSH:\n\n{self.ssh_connection_command(self.tf_output('ovami_ip'))}\n\n* To connect to OV AMI via NICE DCV:\n\n- IP: __ip__\n\n{vnc_instruction}\n\n{nomachine_instruction}\n\n\"\"\".replace(\n \"__app__\", \"OV AMI\"\n ).replace(\n \"__ip__\", self.tf_output(\"ovami_ip\")\n )\n\n # extra text\n if len(extra_text) > 0:\n instructions += extra_text + \"\\n\"\n\n # print instructions for the user\n if print_text:\n click.echo(colorize_result(\"\\n\" + instructions))\n\n # create <dn>/ directory if it doesn't exist\n Path(instructions_file).parent.mkdir(parents=True, exist_ok=True)\n # write file\n Path(instructions_file).write_text(instructions)\n\n return instructions"
}
] | import unittest
from src.python.config import c
from src.python.deployer import Deployer
from pathlib import Path | 4,761 | #!/usr/bin/env python3
class Test_Deployer(unittest.TestCase):
def setUp(self):
self.config = c
self.config["state_dir"] = f"{c['tests_dir']}/res/state"
| #!/usr/bin/env python3
class Test_Deployer(unittest.TestCase):
def setUp(self):
self.config = c
self.config["state_dir"] = f"{c['tests_dir']}/res/state"
| self.deployer = Deployer( | 1 | 2023-10-18 17:25:44+00:00 | 8k |
blackgold3/SemanticBoost | SMPLX/transfer_smpls.py | [
{
"identifier": "write_obj",
"path": "SMPLX/transfer_model/write_obj.py",
"snippet": "def write_obj(\n model_folder,\n motion_file,\n output_folder,\n model_type=\"smplh\",\n gender=\"neutral\",\n num_betas=10,\n num_expression_coeffs=10,\n use_face_contour=False,\n device=\"cpu\"\n):\n output_folder = Path(output_folder)\n assert output_folder.exists()\n\n # open motion file\n motion = np.load(motion_file, allow_pickle=True)\n try:\n poses = motion[\"poses\"]\n gender = str(motion.get(\"gender\", \"neutral\"))\n trans = motion.get(\"trans\", None)\n betas = motion.get(\"betas\", np.zeros([poses.shape[0], 10]))\n except:\n poses, trans, gender, betas = npy2info(motion, 10)\n\n # don't know where this is documented but it's from this part of amass\n # https://github.com/nghorbani/amass/blob/master/src/amass/data/prepare_data.py#L39-L40\n # gdr2num = {'male':-1, 'neutral':0, 'female':1}\n # gdr2num_rev = {v:k for k,v in gdr2num.items()}\n\n model = smplx.create(model_folder, model_type=model_type,\n gender=gender, use_face_contour=use_face_contour,\n num_betas=num_betas,\n num_expression_coeffs=num_expression_coeffs,\n ext=\"npz\", use_pca=False, batch_size=poses.shape[0])\n\n model = model.eval().to(device)\n inputs = info2dict(poses, trans, betas, model_type, device=device)\n output = model(**inputs)\n vertices = output.vertices.detach().cpu().numpy()\n\n for pose_idx in range(vertices.shape[0]):\n curr_vert = vertices[pose_idx]\n\n vertex_colors = np.ones([curr_vert.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8]\n # process=False to avoid creating a new mesh\n tri_mesh = trimesh.Trimesh(\n curr_vert, model.faces, vertex_colors=vertex_colors, process=False\n )\n\n '''\n humanact12 smpl 转 smplx\n 仅和 amass 格式对齐时使用\n '''\n if \"humanact\" in motion_file:\n transf = trimesh.transformations.rotation_matrix(np.radians(90), (1, 0, 0))\n tri_mesh.apply_transform(transf)\n ###################\n\n output_path = output_folder / \"{0:04d}.obj\".format(pose_idx)\n tri_mesh.export(str(output_path))\n\n del model \n del motion"
},
{
"identifier": "read_deformation_transfer",
"path": "SMPLX/transfer_model/utils/def_transfer.py",
"snippet": "def read_deformation_transfer(\n deformation_transfer_path: str,\n device=None,\n use_normal: bool = False,\n) -> Tensor:\n ''' Reads a deformation transfer\n '''\n if device is None:\n device = torch.device('cpu')\n assert osp.exists(deformation_transfer_path), (\n 'Deformation transfer path does not exist:'\n f' {deformation_transfer_path}')\n logger.info(\n f'Loading deformation transfer from: {deformation_transfer_path}')\n # Read the deformation transfer matrix\n with open(deformation_transfer_path, 'rb') as f:\n def_transfer_setup = pickle.load(f, encoding='latin1')\n if 'mtx' in def_transfer_setup:\n def_matrix = def_transfer_setup['mtx']\n if hasattr(def_matrix, 'todense'):\n def_matrix = def_matrix.todense()\n def_matrix = np.array(def_matrix, dtype=np.float32)\n if not use_normal:\n num_verts = def_matrix.shape[1] // 2\n def_matrix = def_matrix[:, :num_verts]\n elif 'matrix' in def_transfer_setup:\n def_matrix = def_transfer_setup['matrix']\n else:\n valid_keys = ['mtx', 'matrix']\n raise KeyError(f'Deformation transfer setup must contain {valid_keys}')\n\n def_matrix = torch.tensor(def_matrix, device=device, dtype=torch.float32)\n return def_matrix"
},
{
"identifier": "build_dataloader",
"path": "SMPLX/transfer_model/data/build.py",
"snippet": "def build_dataloader(datasets):\n mesh_folder_cfg = datasets[\"mesh_folder\"]\n key, *_ = mesh_folder_cfg.keys()\n value = mesh_folder_cfg[key]\n logger.info(f'{key}: {value}\\n')\n dataset = MeshFolder(**mesh_folder_cfg)\n\n batch_size = datasets[\"batch_size\"]\n num_workers = 1\n\n logger.info(\n f'Creating dataloader with B={batch_size}, workers={num_workers}')\n dataloader = dutils.DataLoader(dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=False)\n\n return {'dataloader': dataloader, 'dataset': dataset}"
},
{
"identifier": "run_fitting",
"path": "SMPLX/transfer_model/transfer_model.py",
"snippet": "def run_fitting(\n # exp_cfg,\n batch: Dict[str, Tensor],\n body_model: nn.Module,\n def_matrix: Tensor,\n mask_ids\n) -> Dict[str, Tensor]:\n ''' Runs fitting\n '''\n vertices = batch['vertices']\n faces = batch['faces']\n\n batch_size = len(vertices)\n dtype, device = vertices.dtype, vertices.device\n # summary_steps = exp_cfg.get('summary_steps')\n # interactive = exp_cfg.get('interactive')\n\n summary_steps = 100\n interactive = True\n\n # Get the parameters from the model\n var_dict = get_variables(batch_size, body_model)\n\n # Build the optimizer object for the current batch\n # optim_cfg = exp_cfg.get('optim', {})\n\n optim_cfg = {'type': 'trust-ncg', 'lr': 1.0, 'gtol': 1e-06, 'ftol': -1.0, 'maxiters': 100, 'lbfgs': {'line_search_fn': 'strong_wolfe', 'max_iter': 50}, 'sgd': {'momentum': 0.9, 'nesterov': True}, 'adam': {'betas': [0.9, 0.999], 'eps': 1e-08, 'amsgrad': False}, 'trust_ncg': {'max_trust_radius': 1000.0, 'initial_trust_radius': 0.05, 'eta': 0.15, 'gtol': 1e-05}}\n\n def_vertices = apply_deformation_transfer(def_matrix, vertices, faces)\n\n if mask_ids is None:\n f_sel = np.ones_like(body_model.faces[:, 0], dtype=np.bool_)\n else:\n f_per_v = [[] for _ in range(body_model.get_num_verts())]\n [f_per_v[vv].append(iff) for iff, ff in enumerate(body_model.faces)\n for vv in ff]\n f_sel = list(set(tuple(sum([f_per_v[vv] for vv in mask_ids], []))))\n vpe = get_vertices_per_edge(\n body_model.v_template.detach().cpu().numpy(), body_model.faces[f_sel])\n\n def log_closure():\n return summary_closure(def_vertices, var_dict, body_model,\n mask_ids=mask_ids)\n\n # edge_fitting_cfg = exp_cfg.get('edge_fitting', {})\n edge_fitting_cfg = {'per_part': False, 'reduction': 'mean'}\n\n edge_loss = build_loss(type='vertex-edge', gt_edges=vpe, est_edges=vpe,\n **edge_fitting_cfg)\n edge_loss = edge_loss.to(device=device)\n\n # vertex_fitting_cfg = exp_cfg.get('vertex_fitting', {})\n vertex_fitting_cfg = {}\n\n vertex_loss = build_loss(**vertex_fitting_cfg)\n vertex_loss = vertex_loss.to(device=device)\n\n per_part = edge_fitting_cfg.get('per_part', True)\n logger.info(f'Per-part: {per_part}')\n # Optimize edge-based loss to initialize pose\n if per_part:\n for key, var in tqdm(var_dict.items(), desc='Parts'):\n if 'pose' not in key:\n continue\n\n for jidx in tqdm(range(var.shape[1]), desc='Joints'):\n part = torch.zeros(\n [batch_size, 3], dtype=dtype, device=device,\n requires_grad=True)\n # Build the optimizer for the current part\n optimizer_dict = build_optimizer([part], optim_cfg)\n closure = build_edge_closure(\n body_model, var_dict, edge_loss, optimizer_dict,\n def_vertices, per_part=per_part, part_key=key, jidx=jidx,\n part=part)\n\n minimize(optimizer_dict['optimizer'], closure,\n params=[part],\n summary_closure=log_closure,\n summary_steps=summary_steps,\n interactive=interactive,\n **optim_cfg)\n with torch.no_grad():\n var[:, jidx] = part\n else:\n optimizer_dict = build_optimizer(list(var_dict.values()), optim_cfg)\n closure = build_edge_closure(\n body_model, var_dict, edge_loss, optimizer_dict,\n def_vertices, per_part=per_part)\n\n minimize(optimizer_dict['optimizer'], closure,\n params=var_dict.values(),\n summary_closure=log_closure,\n summary_steps=summary_steps,\n interactive=interactive,\n **optim_cfg)\n\n if 'translation' in var_dict:\n optimizer_dict = build_optimizer([var_dict['translation']], optim_cfg)\n closure = build_vertex_closure(\n body_model, var_dict,\n optimizer_dict,\n def_vertices,\n vertex_loss=vertex_loss,\n mask_ids=mask_ids,\n per_part=False,\n params_to_opt=[var_dict['translation']],\n )\n # Optimize translation\n minimize(optimizer_dict['optimizer'],\n closure,\n params=[var_dict['translation']],\n summary_closure=log_closure,\n summary_steps=summary_steps,\n interactive=interactive,\n **optim_cfg)\n\n # Optimize all model parameters with vertex-based loss\n optimizer_dict = build_optimizer(list(var_dict.values()), optim_cfg)\n closure = build_vertex_closure(\n body_model, var_dict,\n optimizer_dict,\n def_vertices,\n vertex_loss=vertex_loss,\n per_part=False,\n mask_ids=mask_ids)\n minimize(optimizer_dict['optimizer'], closure,\n params=list(var_dict.values()),\n summary_closure=log_closure,\n summary_steps=summary_steps,\n interactive=interactive,\n **optim_cfg)\n\n param_dict = {}\n for key, var in var_dict.items():\n # Decode the axis-angles\n if 'pose' in key or 'orient' in key:\n param_dict[key] = batch_rodrigues(\n var.reshape(-1, 3)).reshape(len(var), -1, 3, 3)\n else:\n # Simply pass the variable\n param_dict[key] = var\n\n body_model_output = body_model(\n return_full_pose=True, get_skin=True, **param_dict)\n\n keys = [\"vertices\", \"joints\", \"betas\", \"global_orient\", \"body_pose\", \"left_hand_pose\", \"right_hand_pose\", \"full_pose\"]\n for key in keys:\n var_dict[key] = getattr(body_model_output, key)\n\n var_dict['faces'] = body_model.faces\n\n for key in var_dict.keys():\n try:\n var_dict[key] = var_dict[key].detach().cpu().numpy()\n except:\n pass\n\n return var_dict"
},
{
"identifier": "merge",
"path": "SMPLX/transfer_model/merge_output.py",
"snippet": "def merge(output_dir, gender):\n output_dir = Path(output_dir)\n assert output_dir.exists()\n assert output_dir.is_dir()\n\n # get list of all pkl files in output_dir with fixed length numeral names\n pkl_files = [f for f in output_dir.glob(\"*.pkl\") if f.stem != \"merged\"]\n pkl_files = [f for f in sorted(pkl_files, key=lambda x: int(x.stem))]\n assert \"merged.pkl\" not in [f.name for f in pkl_files]\n\n merged = {}\n # iterate over keys and put all values in lists\n keys = set(KEYS) \n for k in keys:\n merged[k] = []\n for pkl_file in pkl_files:\n with open(pkl_file, \"rb\") as f:\n data = pickle.load(f)\n for k in keys:\n if k in data:\n merged[k].append(data[k])\n b = np.concatenate(merged[\"betas\"], axis=0)\n print(\"betas:\")\n for mu, sigma in zip(b.mean(0), b.std(0)):\n print(\" {:.3f} +/- {:.3f}\".format(mu, sigma))\n\n # aggregate all values\n for k in keys:\n merged[k] = aggregate_function[k](merged[k])\n\n # add gender\n\n poses = merged[\"full_pose\"]\n trans = merged[\"transl\"]\n if gender == \"female\":\n gender = np.zeros([poses.shape[0], 1])\n elif gender == \"male\":\n gender = np.ones([poses.shape[0], 1])\n else:\n gender = np.ones([poses.shape[0], 1]) * 2\n \n merged = np.concatenate([poses, trans, gender], axis=1)\n \n return merged"
},
{
"identifier": "build_layer",
"path": "SMPLX/smplx/body_models.py",
"snippet": "def build_layer(\n model_path: str,\n model_type: str = 'smpl',\n **kwargs\n) -> Union[SMPLLayer, SMPLHLayer, SMPLXLayer, MANOLayer, FLAMELayer]:\n ''' Method for creating a model from a path and a model type\n\n Parameters\n ----------\n model_path: str\n Either the path to the model you wish to load or a folder,\n where each subfolder contains the differents types, i.e.:\n model_path:\n |\n |-- smpl\n |-- SMPL_FEMALE\n |-- SMPL_NEUTRAL\n |-- SMPL_MALE\n |-- smplh\n |-- SMPLH_FEMALE\n |-- SMPLH_MALE\n |-- smplx\n |-- SMPLX_FEMALE\n |-- SMPLX_NEUTRAL\n |-- SMPLX_MALE\n |-- mano\n |-- MANO RIGHT\n |-- MANO LEFT\n |-- flame\n |-- FLAME_FEMALE\n |-- FLAME_MALE\n |-- FLAME_NEUTRAL\n\n model_type: str, optional\n When model_path is a folder, then this parameter specifies the\n type of model to be loaded\n **kwargs: dict\n Keyword arguments\n\n Returns\n -------\n body_model: nn.Module\n The PyTorch module that implements the corresponding body model\n Raises\n ------\n ValueError: In case the model type is not one of SMPL, SMPLH,\n SMPLX, MANO or FLAME\n '''\n\n if osp.isdir(model_path):\n model_path = os.path.join(model_path, model_type)\n else:\n model_type = osp.basename(model_path).split('_')[0].lower()\n\n if model_type.lower() == 'smpl':\n return SMPLLayer(model_path, **kwargs)\n elif model_type.lower() == 'smplh':\n return SMPLHLayer(model_path, **kwargs)\n elif model_type.lower() == 'smplx':\n return SMPLXLayer(model_path, **kwargs)\n elif 'mano' in model_type.lower():\n return MANOLayer(model_path, **kwargs)\n elif 'flame' in model_type.lower():\n return FLAMELayer(model_path, **kwargs)\n else:\n raise ValueError(f'Unknown model type {model_type}, exiting!')"
}
] | import argparse
import numpy as np
import pickle
import os
import torch
import subprocess
import platform
import time
from SMPLX.transfer_model.write_obj import write_obj
from SMPLX.transfer_model.utils import read_deformation_transfer
from SMPLX.transfer_model.data import build_dataloader
from SMPLX.transfer_model.transfer_model import run_fitting
from SMPLX.transfer_model.merge_output import merge
from SMPLX.smplx import build_layer
from tqdm import tqdm | 4,752 |
def load_npz(path):
return np.load(path)
def load_pickle(path):
with open(path, "rb") as f:
res = pickle.load(f, encoding="latin1")
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='transfer between smpls')
parser.add_argument('--source', default="smpl")
parser.add_argument("--target", default="smplh")
parser.add_argument("--model_path", default="/data/TTA/data/body_models")
parser.add_argument("--extra_dir", default="/data/TTA/data/extra_dir", help="https://smpl-x.is.tue.mpg.de/download.php")
parser.add_argument("--source_path", default="/data/TTA/data/humanact_smpl")
parser.add_argument("--target_path", default="/data/TTA/data/humanact_smplh")
parser.add_argument("--batch_size", default=500, type=int)
args = parser.parse_args()
device = "cuda"
if args.target == "smplx" or args.source == "smplx":
deformation_transfer_path = os.path.join(args.extra_dir, "{}2{}_deftrafo_setup.pkl".format(args.source, args.target))
else:
deformation_transfer_path = os.path.join(args.extra_dir, "{}2{}_def_transfer.pkl".format(args.source, args.target))
if args.target == "smplx":
model_params = {"betas":{"num":10}, "expression":{"num": 10}}
mask_ids_fname = os.path.join(args.extra_dir, "smplx_mask_ids.npy")
if os.path.exists(mask_ids_fname):
mask_ids = np.load(mask_ids_fname)
mask_ids = torch.from_numpy(mask_ids).to(device=device)
else:
print(f'Mask ids fname not found: {mask_ids_fname}')
elif args.target == "smplh" or args.target == "smpl":
model_params = {"betas":{"num":10}}
mask_ids_fname = ""
mask_ids = None
body_model_conf = {
"ext":"npz",
"model_type": args.target,
"folder": args.model_path,
"use_compressed": False,
args.target:model_params
}
if args.target == "smplx" or args.target == "smpl":
body_model_conf["use_face_contour"] = True
for root, dirs, files in os.walk(args.source_path):
for name in files:
curr_file = os.path.join(root, name)
new_root = os.path.join(args.target_path , "/".join(root.split("/")[:-2:-1]))
os.makedirs(new_root, exist_ok=True)
curr_target = os.path.join(new_root, name.replace(".npz", ".npy"))
if os.path.exists(curr_target):
print("%s has been competed"%(curr_target))
continue
if name.split(".")[-1] == "npz":
curr = load_npz(curr_file)
body_pose = None
elif name.split(".")[-1] == "pkl":
curr = load_pickle(curr_file)
body_pose = None
elif name.split(".")[-1] == "npy":
curr = np.load(curr_file)
body_pose = curr
else:
continue
if body_pose is None:
try:
body_pose = curr["poses"]
except:
print("Not Pose Data")
continue
gender = str(curr["gender"])
body_model_conf["gender"] = gender
else:
gender = "neutral"
body_model_conf["gender"] = gender
cid = name.split(".")[0]
save_folder1 = os.path.join("temp", "objs")
save_folder2 = os.path.join(new_root, str(time.time()))
os.makedirs(save_folder1, exist_ok=True)
os.makedirs(save_folder2, exist_ok=True)
write_obj(args.model_path, curr_file, save_folder1, args.source, gender, 10, 10, True, device)
|
def load_npz(path):
return np.load(path)
def load_pickle(path):
with open(path, "rb") as f:
res = pickle.load(f, encoding="latin1")
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='transfer between smpls')
parser.add_argument('--source', default="smpl")
parser.add_argument("--target", default="smplh")
parser.add_argument("--model_path", default="/data/TTA/data/body_models")
parser.add_argument("--extra_dir", default="/data/TTA/data/extra_dir", help="https://smpl-x.is.tue.mpg.de/download.php")
parser.add_argument("--source_path", default="/data/TTA/data/humanact_smpl")
parser.add_argument("--target_path", default="/data/TTA/data/humanact_smplh")
parser.add_argument("--batch_size", default=500, type=int)
args = parser.parse_args()
device = "cuda"
if args.target == "smplx" or args.source == "smplx":
deformation_transfer_path = os.path.join(args.extra_dir, "{}2{}_deftrafo_setup.pkl".format(args.source, args.target))
else:
deformation_transfer_path = os.path.join(args.extra_dir, "{}2{}_def_transfer.pkl".format(args.source, args.target))
if args.target == "smplx":
model_params = {"betas":{"num":10}, "expression":{"num": 10}}
mask_ids_fname = os.path.join(args.extra_dir, "smplx_mask_ids.npy")
if os.path.exists(mask_ids_fname):
mask_ids = np.load(mask_ids_fname)
mask_ids = torch.from_numpy(mask_ids).to(device=device)
else:
print(f'Mask ids fname not found: {mask_ids_fname}')
elif args.target == "smplh" or args.target == "smpl":
model_params = {"betas":{"num":10}}
mask_ids_fname = ""
mask_ids = None
body_model_conf = {
"ext":"npz",
"model_type": args.target,
"folder": args.model_path,
"use_compressed": False,
args.target:model_params
}
if args.target == "smplx" or args.target == "smpl":
body_model_conf["use_face_contour"] = True
for root, dirs, files in os.walk(args.source_path):
for name in files:
curr_file = os.path.join(root, name)
new_root = os.path.join(args.target_path , "/".join(root.split("/")[:-2:-1]))
os.makedirs(new_root, exist_ok=True)
curr_target = os.path.join(new_root, name.replace(".npz", ".npy"))
if os.path.exists(curr_target):
print("%s has been competed"%(curr_target))
continue
if name.split(".")[-1] == "npz":
curr = load_npz(curr_file)
body_pose = None
elif name.split(".")[-1] == "pkl":
curr = load_pickle(curr_file)
body_pose = None
elif name.split(".")[-1] == "npy":
curr = np.load(curr_file)
body_pose = curr
else:
continue
if body_pose is None:
try:
body_pose = curr["poses"]
except:
print("Not Pose Data")
continue
gender = str(curr["gender"])
body_model_conf["gender"] = gender
else:
gender = "neutral"
body_model_conf["gender"] = gender
cid = name.split(".")[0]
save_folder1 = os.path.join("temp", "objs")
save_folder2 = os.path.join(new_root, str(time.time()))
os.makedirs(save_folder1, exist_ok=True)
os.makedirs(save_folder2, exist_ok=True)
write_obj(args.model_path, curr_file, save_folder1, args.source, gender, 10, 10, True, device)
| body_model = build_layer(args.model_path, **body_model_conf) | 5 | 2023-10-20 14:53:26+00:00 | 8k |
justchenhao/SILI_CD | compared_models/changeformer.py | [
{
"identifier": "UpsampleConvLayer",
"path": "compared_models/changeformerbase.py",
"snippet": "class UpsampleConvLayer(torch.nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride):\n super(UpsampleConvLayer, self).__init__()\n self.conv2d = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=1)\n\n def forward(self, x):\n out = self.conv2d(x)\n return out"
},
{
"identifier": "ResidualBlock",
"path": "compared_models/changeformerbase.py",
"snippet": "class ResidualBlock(torch.nn.Module):\n def __init__(self, channels):\n super(ResidualBlock, self).__init__()\n self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1, padding=1)\n self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1, padding=1)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n residual = x\n out = self.relu(self.conv1(x))\n out = self.conv2(out) * 0.1\n out = torch.add(out, residual)\n return out"
},
{
"identifier": "ConvLayer",
"path": "compared_models/changeformerbase.py",
"snippet": "class ConvLayer(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding):\n super(ConvLayer, self).__init__()\n # reflection_padding = kernel_size // 2\n # self.reflection_pad = nn.ReflectionPad2d(reflection_padding)\n self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)\n\n def forward(self, x):\n # out = self.reflection_pad(x)\n out = self.conv2d(x)\n return out"
}
] | import torch
import torch.nn as nn
import torch.nn.functional
import torch.nn.functional as F
import warnings
import math
import os
from functools import partial
from .changeformerbase import UpsampleConvLayer, ResidualBlock, ConvLayer
from timm.models.layers import DropPath, to_2tuple, trunc_normal_ | 3,746 | nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def forward_features(self, x):
B = x.shape[0]
outs = []
# stage 1
x1, H1, W1 = self.patch_embed1(x)
for i, blk in enumerate(self.block1):
x1 = blk(x1, H1, W1)
x1 = self.norm1(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 2
x1, H1, W1 = self.patch_embed2(x1)
for i, blk in enumerate(self.block2):
x1 = blk(x1, H1, W1)
x1 = self.norm2(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 3
x1, H1, W1 = self.patch_embed3(x1)
for i, blk in enumerate(self.block3):
x1 = blk(x1, H1, W1)
x1 = self.norm3(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 4
x1, H1, W1 = self.patch_embed4(x1)
for i, blk in enumerate(self.block4):
x1 = blk(x1, H1, W1)
x1 = self.norm4(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
return outs
def forward(self, x):
x = self.forward_features(x)
return x
class DecoderTransformer_v3(nn.Module):
"""
Transformer Decoder
"""
def __init__(self, input_transform='multiple_select', in_index=[0, 1, 2, 3], align_corners=True,
in_channels=[32, 64, 128, 256], embedding_dim=64, output_nc=2,
decoder_softmax=False, feature_strides=[2, 4, 8, 16]):
super(DecoderTransformer_v3, self).__init__()
# assert
assert len(feature_strides) == len(in_channels)
assert min(feature_strides) == feature_strides[0]
# settings
self.feature_strides = feature_strides
self.input_transform = input_transform
self.in_index = in_index
self.align_corners = align_corners
self.in_channels = in_channels
self.embedding_dim = embedding_dim
self.output_nc = output_nc
c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels
# MLP decoder heads
self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=self.embedding_dim)
self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=self.embedding_dim)
self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=self.embedding_dim)
self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=self.embedding_dim)
# convolutional Difference Modules
self.diff_c4 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
self.diff_c3 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
self.diff_c2 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
self.diff_c1 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
# taking outputs from middle of the encoder
self.make_pred_c4 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
self.make_pred_c3 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
self.make_pred_c2 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
self.make_pred_c1 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
# Final linear fusion layer
self.linear_fuse = nn.Sequential(
nn.Conv2d(in_channels=self.embedding_dim * len(in_channels), out_channels=self.embedding_dim,
kernel_size=1),
nn.BatchNorm2d(self.embedding_dim)
)
# Final predction head
|
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
# pdb.set_trace()
x = self.proj(x)
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
return F.interpolate(input, size, scale_factor, mode, align_corners)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = self.fc1(x)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Attention_dec(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.task_query = nn.Parameter(torch.randn(1, 48, dim))
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
B, N, C = x.shape
task_q = self.task_query
# This is because we fix the task parameters to be of a certain dimension, so with varying batch size, we just stack up the same queries to operate on the entire batch
if B > 1:
task_q = task_q.unsqueeze(0).repeat(B, 1, 1, 1)
task_q = task_q.squeeze(1)
q = self.q(task_q).reshape(B, task_q.shape[1], self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
q = torch.nn.functional.interpolate(q, size=(v.shape[2], v.shape[3]))
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block_dec(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention_dec(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
return x
# Transformer Decoder
class MLP(nn.Module):
"""
Linear Embedding
"""
def __init__(self, input_dim=2048, embed_dim=768):
super().__init__()
self.proj = nn.Linear(input_dim, embed_dim)
def forward(self, x):
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
# Difference module
def conv_diff(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(out_channels),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.ReLU()
)
# Intermediate prediction module
def make_prediction(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(out_channels),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
)
# Transormer Ecoder with x2, x4, x8, x16 scales
class EncoderTransformer_v3(nn.Module):
def __init__(self, img_size=256, patch_size=3, in_chans=3, num_classes=2, embed_dims=[32, 64, 128, 256],
num_heads=[2, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 3, 6, 18], sr_ratios=[8, 4, 2, 1]):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.embed_dims = embed_dims
# patch embedding definitions
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
embed_dim=embed_dims[0])
self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=patch_size, stride=2,
in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=patch_size, stride=2,
in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=patch_size, stride=2,
in_chans=embed_dims[2],
embed_dim=embed_dims[3])
# Stage-1 (x1/4 scale)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
self.norm1 = norm_layer(embed_dims[0])
# Stage-2 (x1/8 scale)
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
self.norm2 = norm_layer(embed_dims[1])
# Stage-3 (x1/16 scale)
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
self.norm3 = norm_layer(embed_dims[2])
# Stage-4 (x1/32 scale)
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
self.norm4 = norm_layer(embed_dims[3])
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def forward_features(self, x):
B = x.shape[0]
outs = []
# stage 1
x1, H1, W1 = self.patch_embed1(x)
for i, blk in enumerate(self.block1):
x1 = blk(x1, H1, W1)
x1 = self.norm1(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 2
x1, H1, W1 = self.patch_embed2(x1)
for i, blk in enumerate(self.block2):
x1 = blk(x1, H1, W1)
x1 = self.norm2(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 3
x1, H1, W1 = self.patch_embed3(x1)
for i, blk in enumerate(self.block3):
x1 = blk(x1, H1, W1)
x1 = self.norm3(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 4
x1, H1, W1 = self.patch_embed4(x1)
for i, blk in enumerate(self.block4):
x1 = blk(x1, H1, W1)
x1 = self.norm4(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
return outs
def forward(self, x):
x = self.forward_features(x)
return x
class DecoderTransformer_v3(nn.Module):
"""
Transformer Decoder
"""
def __init__(self, input_transform='multiple_select', in_index=[0, 1, 2, 3], align_corners=True,
in_channels=[32, 64, 128, 256], embedding_dim=64, output_nc=2,
decoder_softmax=False, feature_strides=[2, 4, 8, 16]):
super(DecoderTransformer_v3, self).__init__()
# assert
assert len(feature_strides) == len(in_channels)
assert min(feature_strides) == feature_strides[0]
# settings
self.feature_strides = feature_strides
self.input_transform = input_transform
self.in_index = in_index
self.align_corners = align_corners
self.in_channels = in_channels
self.embedding_dim = embedding_dim
self.output_nc = output_nc
c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels
# MLP decoder heads
self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=self.embedding_dim)
self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=self.embedding_dim)
self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=self.embedding_dim)
self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=self.embedding_dim)
# convolutional Difference Modules
self.diff_c4 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
self.diff_c3 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
self.diff_c2 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
self.diff_c1 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
# taking outputs from middle of the encoder
self.make_pred_c4 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
self.make_pred_c3 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
self.make_pred_c2 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
self.make_pred_c1 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
# Final linear fusion layer
self.linear_fuse = nn.Sequential(
nn.Conv2d(in_channels=self.embedding_dim * len(in_channels), out_channels=self.embedding_dim,
kernel_size=1),
nn.BatchNorm2d(self.embedding_dim)
)
# Final predction head | self.convd2x = UpsampleConvLayer(self.embedding_dim, self.embedding_dim, kernel_size=4, stride=2) | 0 | 2023-10-21 09:09:57+00:00 | 8k |
pythonlessons/FinRock | experiments/training_ppo_sinusoid.py | [
{
"identifier": "PdDataFeeder",
"path": "finrock/data_feeder.py",
"snippet": "class PdDataFeeder:\n def __init__(\n self, \n df: pd.DataFrame,\n indicators: list = [],\n min: float = None,\n max: float = None\n ) -> None:\n self._df = df\n self._min = min\n self._max = max\n self._indicators = indicators\n self._cache = {}\n\n assert isinstance(self._df, pd.DataFrame) == True, \"df must be a pandas.DataFrame\"\n assert 'timestamp' in self._df.columns, \"df must have 'timestamp' column\"\n assert 'open' in self._df.columns, \"df must have 'open' column\"\n assert 'high' in self._df.columns, \"df must have 'high' column\"\n assert 'low' in self._df.columns, \"df must have 'low' column\"\n assert 'close' in self._df.columns, \"df must have 'close' column\"\n\n assert isinstance(self._indicators, list) == True, \"indicators must be an iterable\"\n assert all(isinstance(indicator, Indicator) for indicator in self._indicators) == True, \"indicators must be a list of Indicator objects\"\n\n @property\n def min(self) -> float:\n return self._min or self._df['low'].min()\n \n @property\n def max(self) -> float:\n return self._max or self._df['high'].max()\n\n def __len__(self) -> int:\n return len(self._df)\n \n def __getitem__(self, idx: int, args=None) -> State:\n # Use cache to speed up training\n if idx in self._cache:\n return self._cache[idx]\n\n indicators = []\n for indicator in self._indicators:\n results = indicator(idx)\n if results is None:\n self._cache[idx] = None\n return None\n \n indicators.append(results)\n\n data = self._df.iloc[idx]\n state = State(\n timestamp=data['timestamp'],\n open=data['open'],\n high=data['high'],\n low=data['low'],\n close=data['close'],\n volume=data.get('volume', 0.0),\n indicators=indicators\n )\n self._cache[idx] = state\n\n return state\n \n def __iter__(self) -> State:\n \"\"\" Create a generator that iterate over the Sequence.\"\"\"\n for index in range(len(self)):\n yield self[index]"
},
{
"identifier": "TradingEnv",
"path": "finrock/trading_env.py",
"snippet": "class TradingEnv:\n def __init__(\n self,\n data_feeder: PdDataFeeder,\n output_transformer: typing.Callable = None,\n initial_balance: float = 1000.0,\n max_episode_steps: int = None,\n window_size: int = 50,\n reward_function: typing.Callable = simpleReward,\n metrics: typing.List[typing.Callable] = []\n ) -> None:\n self._data_feeder = data_feeder\n self._output_transformer = output_transformer\n self._initial_balance = initial_balance\n self._max_episode_steps = max_episode_steps if max_episode_steps is not None else len(data_feeder)\n self._window_size = window_size\n self._reward_function = reward_function\n self._metrics = metrics\n\n self._observations = Observations(window_size=window_size)\n self._observation_space = np.zeros(self.reset()[0].shape)\n self.action_space = 3\n\n @property\n def observation_space(self):\n return self._observation_space\n\n def _get_obs(self, index: int, balance: float=None) -> State:\n next_state = self._data_feeder[index]\n if next_state is None:\n return None\n\n if balance is not None:\n next_state.balance = balance\n\n return next_state\n \n def _get_terminated(self):\n return False\n \n def _take_action(self, action: int, order_size: float) -> typing.Tuple[int, float]:\n # validate action is in range\n assert (action in list(range(self.action_space))) == True, f'action must be in range {self.action_space}, received: {action}'\n\n # get last state and next state\n last_state, next_state = self._observations[-2:]\n\n # modify action to hold (0) if we are out of balance\n if action == 2 and last_state.allocation_percentage == 1.0:\n action = 0\n\n # modify action to hold (0) if we are out of assets\n elif action == 1 and last_state.allocation_percentage == 0.0:\n action = 0\n\n if action == 2: # buy\n next_state.allocation_percentage = order_size\n next_state.assets = last_state.balance * order_size / last_state.close\n next_state.balance = last_state.balance - (last_state.balance * order_size)\n\n elif action == 1: # sell\n next_state.allocation_percentage = 0.0\n next_state.balance = last_state.assets * order_size * last_state.close\n next_state.assets = 0.0\n\n else: # hold\n next_state.allocation_percentage = last_state.allocation_percentage\n next_state.assets = last_state.assets\n next_state.balance = last_state.balance\n\n return action, order_size\n \n @property\n def metrics(self):\n return self._metrics\n\n def _metricsHandler(self, observation: State):\n metrics = {}\n # Loop through metrics and update\n for metric in self._metrics:\n metric.update(observation)\n metrics[metric.name] = metric.result\n\n return metrics\n\n def step(self, action: int) -> typing.Tuple[State, float, bool, bool, dict]:\n\n index = self._env_step_indexes.pop(0)\n\n observation = self._get_obs(index)\n # update observations object with new observation\n self._observations.append(observation)\n\n order_size = 1.0\n action, order_size = self._take_action(action, order_size)\n reward = self._reward_function(self._observations)\n terminated = self._get_terminated()\n truncated = False if self._env_step_indexes else True\n info = {\n \"states\": [observation],\n \"metrics\": self._metricsHandler(observation)\n }\n\n transformed_obs = self._output_transformer.transform(self._observations)\n\n if np.isnan(transformed_obs).any():\n raise ValueError(\"transformed_obs contains nan values, check your data\")\n\n return transformed_obs, reward, terminated, truncated, info\n\n def reset(self) -> typing.Tuple[State, dict]:\n \"\"\" Reset the environment and return the initial state\n \"\"\"\n size = len(self._data_feeder) - self._max_episode_steps\n self._env_start_index = np.random.randint(0, size) if size > 0 else 0\n self._env_step_indexes = list(range(self._env_start_index, self._env_start_index + self._max_episode_steps))\n\n # Initial observations are the first states of the window size\n self._observations.reset()\n while not self._observations.full:\n obs = self._get_obs(self._env_step_indexes.pop(0), balance=self._initial_balance)\n if obs is None:\n continue\n # update observations object with new observation\n self._observations.append(obs)\n\n info = {\n \"states\": self._observations.observations,\n \"metrics\": {}\n }\n \n # reset metrics with last state\n for metric in self._metrics:\n metric.reset(self._observations.observations[-1])\n\n transformed_obs = self._output_transformer.transform(self._observations)\n if np.isnan(transformed_obs).any():\n raise ValueError(\"transformed_obs contains nan values, check your data\")\n \n # return state and info\n return transformed_obs, info\n\n def render(self):\n raise NotImplementedError\n\n def close(self):\n raise NotImplementedError"
},
{
"identifier": "MinMaxScaler",
"path": "finrock/scalers.py",
"snippet": "class MinMaxScaler:\n def __init__(self, min: float, max: float):\n self._min = min\n self._max = max\n \n def transform(self, observations: Observations) -> np.ndarray:\n\n assert isinstance(observations, Observations) == True, \"observations must be an instance of Observations\"\n\n transformed_data = []\n for state in observations:\n data = []\n for name in ['open', 'high', 'low', 'close']:\n value = getattr(state, name)\n transformed_value = (value - self._min) / (self._max - self._min)\n data.append(transformed_value)\n \n data.append(state.allocation_percentage)\n\n # append scaled indicators\n for indicator in state.indicators:\n for value in indicator[\"values\"].values():\n transformed_value = (value - indicator[\"min\"]) / (indicator[\"max\"] - indicator[\"min\"])\n data.append(transformed_value)\n\n transformed_data.append(data)\n\n return np.array(transformed_data)\n \n def __call__(self, observations) -> np.ndarray:\n return self.transform(observations)"
},
{
"identifier": "simpleReward",
"path": "finrock/reward.py",
"snippet": "def simpleReward(observations: Observations) -> float:\n \n assert isinstance(observations, Observations) == True, \"observations must be an instance of Observations\"\n\n last_state, next_state = observations[-2:]\n\n # buy\n if next_state.allocation_percentage > last_state.allocation_percentage:\n # check whether it was good or bad to buy\n order_size = next_state.allocation_percentage - last_state.allocation_percentage\n reward = (next_state.close - last_state.close) / last_state.close * order_size\n\n # sell\n elif next_state.allocation_percentage < last_state.allocation_percentage:\n # check whether it was good or bad to sell\n order_size = last_state.allocation_percentage - next_state.allocation_percentage\n reward = -1 * (next_state.close - last_state.close) / last_state.close * order_size\n\n # hold\n else:\n # check whether it was good or bad to hold\n ratio = -1 if not last_state.allocation_percentage else last_state.allocation_percentage\n reward = (next_state.close - last_state.close) / last_state.close * ratio\n \n return reward"
},
{
"identifier": "DifferentActions",
"path": "finrock/metrics.py",
"snippet": "class DifferentActions(Metric):\n def __init__(self, name: str=\"different_actions\") -> None:\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n\n if not self.prev_state:\n self.prev_state = state\n else:\n if state.allocation_percentage != self.prev_state.allocation_percentage:\n self.different_actions += 1\n\n self.prev_state = state\n\n @property\n def result(self):\n return self.different_actions\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n\n self.prev_state = prev_state\n self.different_actions = 0"
},
{
"identifier": "AccountValue",
"path": "finrock/metrics.py",
"snippet": "class AccountValue(Metric):\n def __init__(self, name: str=\"account_value\") -> None:\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n\n self.account_value = state.account_value\n\n @property\n def result(self):\n return self.account_value\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n \n self.account_value = prev_state.account_value if prev_state else 0.0"
},
{
"identifier": "MaxDrawdown",
"path": "finrock/metrics.py",
"snippet": "class MaxDrawdown(Metric):\n \"\"\" The Maximum Drawdown (MDD) is a measure of the largest peak-to-trough decline in the \n value of a portfolio or investment during a specific period\n\n The Maximum Drawdown Ratio represents the proportion of the peak value that was lost during \n the largest decline. It is a measure of the risk associated with a particular investment or \n portfolio. Investors and fund managers use the Maximum Drawdown and its ratio to assess the \n historical downside risk and potential losses that could be incurred.\n \"\"\"\n def __init__(self, name: str=\"max_drawdown\") -> None:\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n\n # Use min to find the trough value\n self.max_account_value = max(self.max_account_value, state.account_value)\n\n # Calculate drawdown\n drawdown = (state.account_value - self.max_account_value) / self.max_account_value\n\n # Update max drawdown if the current drawdown is greater\n self.max_drawdown = min(self.max_drawdown, drawdown)\n\n @property\n def result(self):\n return self.max_drawdown\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n\n self.max_account_value = prev_state.account_value if prev_state else 0.0\n self.max_drawdown = 0.0"
},
{
"identifier": "SharpeRatio",
"path": "finrock/metrics.py",
"snippet": "class SharpeRatio(Metric):\n \"\"\" The Sharpe Ratio, is a measure of the risk-adjusted performance of an investment or a portfolio. \n It helps investors evaluate the return of an investment relative to its risk.\n\n A higher Sharpe Ratio indicates a better risk-adjusted performance. Investors and portfolio managers \n often use the Sharpe Ratio to compare the risk-adjusted returns of different investments or portfolios. \n It allows them to assess whether the additional return earned by taking on additional risk is justified.\n \"\"\"\n def __init__(self, ratio_days=365.25, name: str='sharpe_ratio'):\n self.ratio_days = ratio_days\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n time_difference_days = (state.date - self.prev_state.date).days\n if time_difference_days >= 1:\n self.daily_returns.append((state.account_value - self.prev_state.account_value) / self.prev_state.account_value)\n self.account_values.append(state.account_value)\n self.prev_state = state\n \n @property\n def result(self):\n if len(self.daily_returns) == 0:\n return 0.0\n\n mean = np.mean(self.daily_returns)\n std = np.std(self.daily_returns)\n if std == 0:\n return 0.0\n \n sharpe_ratio = mean / std * np.sqrt(self.ratio_days)\n \n return sharpe_ratio\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n self.prev_state = prev_state\n self.account_values = []\n self.daily_returns = []"
},
{
"identifier": "BolingerBands",
"path": "finrock/indicators.py",
"snippet": "class BolingerBands(Indicator):\n \"\"\" Volatility indicator\n\n Bollinger Bands are a type of price envelope developed by John BollingerOpens in a new window. (Price envelopes define \n upper and lower price range levels.) Bollinger Bands are envelopes plotted at a standard deviation level above and \n below a simple moving average of the price. Because the distance of the bands is based on standard deviation, they \n adjust to volatility swings in the underlying price.\n\n Bollinger Bands use 2 parameters, Period and Standard Deviations, StdDev. The default values are 20 for period, and 2 \n for standard deviations, although you may customize the combinations.\n\n Bollinger bands help determine whether prices are high or low on a relative basis. They are used in pairs, both upper\n and lower bands and in conjunction with a moving average. Further, the pair of bands is not intended to be used on its own. \n Use the pair to confirm signals given with other indicators.\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n period: int=20, \n std: int=2,\n target_column: str='close',\n render_options: dict={}\n ):\n self._period = period\n self._std = std\n self._names = ['SMA', 'BB_up', 'BB_dn']\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return self._data['BB_dn'].min()\n \n @property\n def max(self):\n return self._data['BB_up'].max()\n\n def compute(self):\n self._data['SMA'] = self._data[self.target_column].rolling(self._period).mean()\n self._data['BB_up'] = self._data['SMA'] + self._data[self.target_column].rolling(self._period).std() * self._std\n self._data['BB_dn'] = self._data['SMA'] - self._data[self.target_column].rolling(self._period).std() * self._std\n\n def default_render_options(self):\n return {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.MAIN,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max\n ) for name in self._names}"
},
{
"identifier": "RSI",
"path": "finrock/indicators.py",
"snippet": "class RSI(Indicator):\n \"\"\" Momentum indicator\n\n The Relative Strength Index (RSI), developed by J. Welles Wilder, is a momentum oscillator that measures the speed and \n change of price movements. The RSI oscillates between zero and 100. Traditionally the RSI is considered overbought when \n above 70 and oversold when below 30. Signals can be generated by looking for divergences and failure swings. \n RSI can also be used to identify the general trend.\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n period: int=14, \n target_column: str='close',\n render_options: dict={}\n ):\n self._period = period\n self._names = ['RSI']\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return 0.0\n \n @property\n def max(self):\n return 100.0\n\n def compute(self):\n delta = self._data[self.target_column].diff()\n up = delta.clip(lower=0)\n down = -1 * delta.clip(upper=0)\n ema_up = up.ewm(com=self._period-1, adjust=True, min_periods=self._period).mean()\n ema_down = down.ewm(com=self._period-1, adjust=True, min_periods=self._period).mean()\n rs = ema_up / ema_down\n self._data['RSI'] = 100 - (100 / (1 + rs))\n\n def default_render_options(self):\n custom_options = {\n \"RSI0\": 0,\n \"RSI30\": 30,\n \"RSI70\": 70,\n \"RSI100\": 100\n }\n options = {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.SEPERATE,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max\n ) for name in self._names}\n\n for name, value in custom_options.items():\n options[name] = RenderOptions(\n name=name,\n color=(192, 192, 192),\n window_type=WindowType.SEPERATE,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max,\n value=value\n )\n return options"
},
{
"identifier": "PSAR",
"path": "finrock/indicators.py",
"snippet": "class PSAR(Indicator):\n \"\"\" Parabolic Stop and Reverse (Parabolic SAR)\n\n The Parabolic Stop and Reverse, more commonly known as the\n Parabolic SAR,is a trend-following indicator developed by\n J. Welles Wilder. The Parabolic SAR is displayed as a single\n parabolic line (or dots) underneath the price bars in an uptrend,\n and above the price bars in a downtrend.\n\n https://school.stockcharts.com/doku.php?id=technical_indicators:parabolic_sar\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n step: float=0.02, \n max_step: float=0.2,\n target_column: str='close',\n render_options: dict={}\n ):\n self._names = ['PSAR']\n self._step = step\n self._max_step = max_step\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return self._data['PSAR'].min()\n \n @property\n def max(self):\n return self._data['PSAR'].max()\n\n def default_render_options(self):\n return {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.MAIN,\n render_type=RenderType.DOT,\n min=self.min,\n max=self.max\n ) for name in self._names}\n\n def compute(self):\n high = self._data['high']\n low = self._data['low']\n close = self._data[self.target_column]\n\n up_trend = True\n acceleration_factor = self._step\n up_trend_high = high.iloc[0]\n down_trend_low = low.iloc[0]\n\n self._psar = close.copy()\n self._psar_up = pd.Series(index=self._psar.index, dtype=\"float64\")\n self._psar_down = pd.Series(index=self._psar.index, dtype=\"float64\")\n\n for i in range(2, len(close)):\n reversal = False\n\n max_high = high.iloc[i]\n min_low = low.iloc[i]\n\n if up_trend:\n self._psar.iloc[i] = self._psar.iloc[i - 1] + (\n acceleration_factor * (up_trend_high - self._psar.iloc[i - 1])\n )\n\n if min_low < self._psar.iloc[i]:\n reversal = True\n self._psar.iloc[i] = up_trend_high\n down_trend_low = min_low\n acceleration_factor = self._step\n else:\n if max_high > up_trend_high:\n up_trend_high = max_high\n acceleration_factor = min(\n acceleration_factor + self._step, self._max_step\n )\n\n low1 = low.iloc[i - 1]\n low2 = low.iloc[i - 2]\n if low2 < self._psar.iloc[i]:\n self._psar.iloc[i] = low2\n elif low1 < self._psar.iloc[i]:\n self._psar.iloc[i] = low1\n else:\n self._psar.iloc[i] = self._psar.iloc[i - 1] - (\n acceleration_factor * (self._psar.iloc[i - 1] - down_trend_low)\n )\n\n if max_high > self._psar.iloc[i]:\n reversal = True\n self._psar.iloc[i] = down_trend_low\n up_trend_high = max_high\n acceleration_factor = self._step\n else:\n if min_low < down_trend_low:\n down_trend_low = min_low\n acceleration_factor = min(\n acceleration_factor + self._step, self._max_step\n )\n\n high1 = high.iloc[i - 1]\n high2 = high.iloc[i - 2]\n if high2 > self._psar.iloc[i]:\n self._psar[i] = high2\n elif high1 > self._psar.iloc[i]:\n self._psar.iloc[i] = high1\n\n up_trend = up_trend != reversal # XOR\n\n if up_trend:\n self._psar_up.iloc[i] = self._psar.iloc[i]\n else:\n self._psar_down.iloc[i] = self._psar.iloc[i]\n\n # calculate psar indicator\n self._data['PSAR'] = self._psar"
},
{
"identifier": "SMA",
"path": "finrock/indicators.py",
"snippet": "class SMA(Indicator):\n \"\"\" Trend indicator\n\n A simple moving average (SMA) calculates the average of a selected range of prices, usually closing prices, by the number \n of periods in that range.\n\n The SMA is a technical indicator for determining if an asset price will continue or reverse a bull or bear trend. It is \n calculated by summing up the closing prices of a stock over time and then dividing that total by the number of time periods \n being examined. Short-term averages respond quickly to changes in the price of the underlying, while long-term averages are \n slow to react.\n\n https://www.investopedia.com/terms/s/sma.asp\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n period: int=20, \n target_column: str='close',\n render_options: dict={}\n ):\n self._period = period\n self._names = [f'SMA{period}']\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return self._data[self.names[0]].min()\n \n @property\n def max(self):\n return self._data[self.names[0]].max()\n \n def default_render_options(self):\n return {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.MAIN,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max\n ) for name in self._names}\n\n def compute(self):\n self._data[self.names[0]] = self._data[self.target_column].rolling(self._period).mean()"
}
] | import numpy as np
import pandas as pd
import tensorflow as tf
from keras import layers, models
from finrock.data_feeder import PdDataFeeder
from finrock.trading_env import TradingEnv
from finrock.scalers import MinMaxScaler
from finrock.reward import simpleReward
from finrock.metrics import DifferentActions, AccountValue, MaxDrawdown, SharpeRatio
from finrock.indicators import BolingerBands, RSI, PSAR, SMA
from rockrl.utils.misc import MeanAverage
from rockrl.utils.memory import Memory
from rockrl.tensorflow import PPOAgent | 6,517 | tf.get_logger().setLevel('ERROR')
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
df = pd.read_csv('Datasets/random_sinusoid.csv')
df = df[:-1000] # leave 1000 for testing
pd_data_feeder = PdDataFeeder(
df,
indicators = [
BolingerBands(data=df, period=20, std=2),
RSI(data=df, period=14),
| tf.get_logger().setLevel('ERROR')
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
df = pd.read_csv('Datasets/random_sinusoid.csv')
df = df[:-1000] # leave 1000 for testing
pd_data_feeder = PdDataFeeder(
df,
indicators = [
BolingerBands(data=df, period=20, std=2),
RSI(data=df, period=14), | PSAR(data=df), | 10 | 2023-10-23 07:44:54+00:00 | 8k |
hitlic/deepepochs | deepepochs/callbacks/interprete.py | [
{
"identifier": "Callback",
"path": "deepepochs/callbacks/callback.py",
"snippet": "class Callback:\n \"\"\"\n 所有Callback的基类。\n\n 方法执行流程:\n on_before_fit\n on_before_epoch\n on_before_train_epochs # 多个训练任务\n on_before_train_epoch\n on_before_train_batch\n on_before_train_forward\n on_after_train_forward\n # 累积梯度时重复多次\n on_before_backward\n on_after_backward\n # Accelerate累积梯度时重复多次\n on_before_optimize\n on_after_optimize\n on_train_metrics\n on_after_train_batch\n ...\n on_after_train_epoch\n ...\n on_after_train_epochs\n on_before_val_epochs # 多个验证任务\n on_before_val_epoch\n on_before_val_batch\n on_before_val_forward\n on_after_val_forward\n on_val_metrics\n on_after_val_batch\n ...\n on_after_val_epoch\n ...\n on_after_val_epochs\n on_after_epoch\n ...\n on_after_fit\n on_before_test_epochs # 多个测试任务\n on_before_test_epoch\n on_before_test_batch\n on_before_test_forward\n on_after_test_forward\n on_test_metrics\n on_after_test_batch\n ...\n on_after_test_epoch\n ...\n on_after_test_epochs\n \"\"\"\n def __init__(self, priority=1):\n \"\"\"\n Args:\n priority: 任意数值。Callback的优先级,priority值越大before方法越先执行,after方法越后执行。\n 默认取值为时间,即以创建时间为优先级。\n \"\"\"\n self.priority = priority * time.time()\n\n def on_before_fit(self, trainer, epochs):\n \"\"\"\n Args:\n trainer: Trainer\n epochs: 训练总epochs数\n \"\"\"\n\n def on_before_epoch(self, trainer, train_tasks, val_tasks, epoch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n train_task: 训练任务\n val_tasks: 验证任务列表\n epoch_idx: 当前训练的epoch index\n \"\"\"\n\n def on_before_train_epochs(self, trainer, tasks, epoch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n tasks: 训练任务列表\n epoch_idx: 当前训练的epoch index\n \"\"\"\n\n def on_before_train_epoch(self, trainer, task):\n \"\"\"\n Args:\n trainer: Trainer\n task: 训练任务\n total_batchs: mini-batch总数\n \"\"\"\n\n def on_before_train_batch(self, trainer, batch_x, batch_y, batch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n batch_x: 当前训练batch模型的输入数据\n batch_y: 当前训练batch的标签\n batch_idx: 当前训练batch index\n \"\"\"\n\n def on_before_train_forward(self, trainer):\n \"\"\"\n Args:\n trainer: Trainer\n \"\"\"\n\n def on_after_train_forward(self, trainer, model_out):\n \"\"\"\n Args:\n trainer: Trainer\n model_out: 模型前向预测输出\n \"\"\"\n\n def on_before_backward(self, trainer, loss):\n \"\"\"\n *累积梯度训练时会重复多次\n Args:\n trainer: Trainer\n loss: loss\n \"\"\"\n\n def on_after_backward(self, trainer, loss):\n \"\"\"\n *累积梯度训练时会重复多次\n Args:\n trainer: Trainer\n loss: loss\n \"\"\"\n\n def on_before_optimize(self, trainer):\n \"\"\"\n *Accelerate累积梯度训练时会重复多次\n Args:\n trainer: Trainer\n \"\"\"\n\n def on_after_optimize(self, trainer):\n \"\"\"\n *Accelerate累积梯度训练时会重复多次\n Args:\n trainer: Trainer\n \"\"\"\n\n def on_train_metrics(self, trainer, loss, model_out, batch_y, task):\n \"\"\"\n Args:\n trainer: Trainer\n loss: 当前训练batch的损失\n model_out: 模型前向预测输出\n batch_y: 当前训练batch的标签\n task: 当前的EpochTask\n \"\"\"\n\n def on_after_train_batch(self, trainer, metrics, batch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n metrics: 当前batch的训练指标值字典\n batch_idx: 当前batch index\n \"\"\"\n\n def on_after_train_epoch(self, trainer, task, metrics):\n \"\"\"\n Args:\n trainer: Trainer\n task: 训练任务\n metrics: 当前epoch的训练指标值字典\n \"\"\"\n\n def on_after_train_epochs(self, trainer, tasks, metrics, epoch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n tasks: 训练任务列表\n metrics: 训练epoch的验证指标值字典\n epoch_idx: epoch_idx: 当前训练的epoch index\n \"\"\"\n\n def on_before_val_epochs(self, trainer, tasks, epoch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n tasks: 验证任务列表\n epoch_idx: 当前训练的epoch index\n \"\"\"\n\n def on_before_val_epoch(self, trainer, task):\n \"\"\"\n Args:\n trainer: Trainer\n total_batchs: mini-batch总数\n \"\"\"\n\n def on_before_val_batch(self, trainer, batch_x, batch_y, batch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n batch_x: 当前验证batch模型的输入数据\n batch_y: 当前验证batch的标签\n batch_idx: 当前验证batch index\n \"\"\"\n\n def on_before_val_forward(self, trainer):\n \"\"\"\n Args:\n trainer: Trainer\n \"\"\"\n\n def on_after_val_forward(self, trainer, model_out):\n \"\"\"\n Args:\n trainer: Trainer\n model_out: 模型前向预测输出\n \"\"\"\n\n def on_val_metrics(self, trainer, loss, model_out, batch_y, task):\n \"\"\"\n Args:\n trainer: Trainer\n loss: 当前batch的损失\n model_out: 模型前向预测输出\n batch_y: 当前验证batch的标签\n task: 当前的EpochTask\n \"\"\"\n\n def on_after_val_batch(self, trainer, metrics, batch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n metrics: 当前验证batch的指标值字典\n batch_idx: 当前验证batch index\n \"\"\"\n\n def on_after_val_epoch(self, trainer, task, metrics):\n \"\"\"\n Args:\n trainer: Trainer\n metrics: 验证epoch的验证指标值字典\n \"\"\"\n\n def on_after_val_epochs(self, trainer, tasks, metrics, epoch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n metrics: 验证epoch的验证指标值字典\n epoch_idx: epoch_idx: 当前训练的epoch index\n \"\"\"\n\n def on_after_epoch(self, trainer, train_tasks, val_tasks, train_metrics, val_metrics, epoch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n train_metrics: 当前epoch的训练指标字典\n val_metrics: 当前epoch的验证指标字典\n epoch_idx: 当前训练epoch index\n \"\"\"\n\n def on_after_fit(self, trainer):\n \"\"\"\n Args:\n trainer: Trainer\n \"\"\"\n\n def on_before_test_epochs(self, trainer, tasks):\n \"\"\"\n Args:\n trainer: Trainer\n tasks: 测试任务列表\n \"\"\"\n\n def on_before_test_epoch(self, trainer, task):\n \"\"\"\n Args:\n trainer: Trainer\n total_batchs: mini-batch总数\n \"\"\"\n\n def on_before_test_batch(self, trainer, batch_x, batch_y, batch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n batch_x: 当前测试batch模型的输入数据\n batch_y: 当前测试batch的标签\n batch_idx: 当前测试batch index\n \"\"\"\n\n def on_before_test_forward(self, trainer):\n \"\"\"\n Args:\n trainer: Trainer\n \"\"\"\n\n def on_after_test_forward(self, trainer, model_out):\n \"\"\"\n Args:\n trainer: Trainer\n model_out: 模型前向预测输出\n \"\"\"\n\n def on_test_metrics(self, trainer, loss, model_out, batch_y, task):\n \"\"\"\n Args:\n trainer: Trainer\n loss: 当前batch的损失\n model_out: 模型前向预测输出\n batch_y: 当前测试batch的标签\n task: 当前的EpochTask\n \"\"\"\n\n def on_after_test_batch(self, trainer, metrics, batch_idx):\n \"\"\"\n Args:\n trainer: Trainer\n metrics: 当前测试batch的指标值字典\n batch_idx: 当前测试batch index\n \"\"\"\n\n def on_after_test_epoch(self, trainer, task, metrics):\n \"\"\"\n Args:\n trainer: Trainer\n metrics: 测试epoch的指标值字典\n \"\"\"\n\n def on_after_test_epochs(self, trainer, tasks, metrics):\n \"\"\"\n Args:\n trainer: Trainer\n tasks: 测试任务列表\n metrics: 测试epochs的验证指标值字典\n \"\"\""
},
{
"identifier": "run_tensorboard",
"path": "deepepochs/callbacks/log.py",
"snippet": "def run_tensorboard(self):\n run_tensorboard(self.log_dir)"
},
{
"identifier": "plot_confusion",
"path": "deepepochs/tools.py",
"snippet": "def plot_confusion(c_matrix, class_num, class_names=None,\n norm_dec=2, cmap='Blues', info=''):\n \"\"\"\n 画出混淆矩阵。\n Args:\n c_matrix: 混淆矩阵\n class_num: 类别数量\n class_names: 各类名称,可选参数\n norm_dec: 标准化保留小数点位数\n cmap: 配色方案\n info: 显示在图像标题中的其他信息\n \"\"\"\n title = 'Confusion matrix'\n\n data_size = c_matrix.sum()\n c_matrix = c_matrix.astype('int')\n\n fig = plt.figure()\n\n plt.imshow(c_matrix, interpolation='nearest', cmap=cmap)\n plt.title(f'{title} - ({data_size}) \\n{info}')\n if class_names and len(class_names) == class_num:\n tick_marks = np.arange(class_num)\n plt.xticks(tick_marks, class_names, rotation=90)\n plt.yticks(tick_marks, class_names, rotation=0)\n\n thresh = c_matrix.max() / 2.\n for i, j in itertools.product(range(c_matrix.shape[0]), range(c_matrix.shape[1])):\n coeff = f'{c_matrix[i, j]}'\n plt.text(j, i, coeff, horizontalalignment=\"center\", verticalalignment=\"center\",\n color=\"yellow\" if c_matrix[i, j] > thresh else \"green\")\n\n ax = fig.gca()\n ax.set_ylim(class_num-.5, -.5)\n\n ax.xaxis.set_major_locator(MultipleLocator(1))\n ax.yaxis.set_major_locator(MultipleLocator(1))\n\n plt.ylabel('Target')\n plt.xlabel('Prediction')\n plt.grid(False)\n # plt.tight_layout()\n fig.subplots_adjust(bottom=0.15)\n return fig"
},
{
"identifier": "TopKQueue",
"path": "deepepochs/tools.py",
"snippet": "class TopKQueue(PriorityQueue):\n \"\"\"\n 能够保存最大k个值的优先队列\n \"\"\"\n def __init__(self, k: int = 0):\n super().__init__(maxsize=k)\n\n def put(self, e):\n if self.full():\n if e[0] > self.queue[0][0]:\n self.get()\n else:\n return\n super().put(e)\n\n def items(self):\n return sorted(self.queue, key=lambda e: e[0], reverse=True)"
},
{
"identifier": "confusion_matrix",
"path": "deepepochs/metrics.py",
"snippet": "@lru_cache(maxsize=1)\ndef confusion_matrix(preds, targets, num_classes):\n \"\"\"\n Args:\n preds: 预测向量,可为binary或多维概率分布\n targets: 标签向量,可为one-hot或非one-hot的\n num_class: 类别数量\n \"\"\"\n if (preds.dim()==1 or preds.shape[-1]==1) and num_classes==2: # 当预测为binary时\n preds = preds.unsqueeze(-1) if preds.dim()==1 else preds\n preds = torch.concat([1-preds, preds], dim=-1)\n preds = preds.argmax(dim=-1).flatten().int()\n\n if targets.dim() > 1 and targets.shape[-1] > 1: # 当targets为one-hot时\n targets = targets.argmax(dim=1).int()\n else:\n targets = targets.flatten().int()\n cm = torch.zeros([num_classes, num_classes], dtype=preds.dtype, device=preds.device)\n one = torch.tensor([1], dtype=preds.dtype, device=preds.device)\n return cm.index_put_((targets, preds), one, accumulate=True)"
},
{
"identifier": "get_class_num",
"path": "deepepochs/metrics.py",
"snippet": "def get_class_num(preds, targets):\n \"\"\"获取类别数量\"\"\"\n if isinstance(preds, (list, tuple)):\n preds = preds[0]\n if preds.shape[1] == 1:\n num_classes = int((max(targets) + 1).item())\n else:\n num_classes = preds.shape[1]\n return num_classes"
},
{
"identifier": "check_path",
"path": "deepepochs/loops.py",
"snippet": "def check_path(path, create=True):\r\n \"\"\"检查路径是否存在\"\"\"\r\n if not osp.exists(path):\r\n if create:\r\n os.makedirs(path)\r\n else:\r\n raise ValueError(f'Path \"{path}\" does not exists!')\r\n return path\r"
}
] | from torch.utils.tensorboard import SummaryWriter
from os import path as osp
from matplotlib import pyplot as plt
from .callback import Callback
from .log import run_tensorboard
from ..tools import plot_confusion, TopKQueue
from ..metrics import confusion_matrix, get_class_num
from ..loops import check_path | 4,198 |
class InterpreteCallback(Callback):
def __init__(self, metric=None, k=100, mode='max', stages=('train', 'val', 'test'), class_num=None, log_dir='./logs', image_data=False):
"""
Args:
metric: none reducted callable
k: number of samples to keep
mode: 'max' or 'min'
stages: 'train' 'val' or 'test'
class_num: 类别数量
log_dir: 日志存储路径
image_data: 数据是否是图片(如果是则在tensorboard中保存图片)
"""
super().__init__()
assert mode in ['min', 'max']
stages = stages if isinstance(stages, (list, tuple)) else [stages]
assert all(s in ['train', 'val', 'test'] for s in stages ), 'stages的值为 train、val、test或者其组合'
self.metric = metric
self.stages = stages
self.mode = mode
self.batch_recorder = []
self.top_queue = TopKQueue(k=k)
self.confusion_matrix = None
self.class_num = class_num
self.log_dir = log_dir
self.image_data = image_data
def on_before_fit(self, trainer, epochs):
log_dir = osp.join(self.log_dir, trainer.running_id)
check_path(log_dir)
logger = getattr(trainer, 'logger', None)
if logger is None:
self.logger = SummaryWriter(log_dir=log_dir)
trainer.logger = self.logger
else:
self.logger = logger
def on_before_train_epochs(self, trainer, tasks, epoch_idx):
self.confusion_matrix=None
def on_before_val_epochs(self, trainer, tasks, epoch_idx):
self.confusion_matrix=None
def on_before_test_epochs(self, trainer, tasks):
self.confusion_matrix=None
def on_before_train_batch(self, trainer, batch_x, batch_y, batch_idx):
if self.class_num is None:
|
class InterpreteCallback(Callback):
def __init__(self, metric=None, k=100, mode='max', stages=('train', 'val', 'test'), class_num=None, log_dir='./logs', image_data=False):
"""
Args:
metric: none reducted callable
k: number of samples to keep
mode: 'max' or 'min'
stages: 'train' 'val' or 'test'
class_num: 类别数量
log_dir: 日志存储路径
image_data: 数据是否是图片(如果是则在tensorboard中保存图片)
"""
super().__init__()
assert mode in ['min', 'max']
stages = stages if isinstance(stages, (list, tuple)) else [stages]
assert all(s in ['train', 'val', 'test'] for s in stages ), 'stages的值为 train、val、test或者其组合'
self.metric = metric
self.stages = stages
self.mode = mode
self.batch_recorder = []
self.top_queue = TopKQueue(k=k)
self.confusion_matrix = None
self.class_num = class_num
self.log_dir = log_dir
self.image_data = image_data
def on_before_fit(self, trainer, epochs):
log_dir = osp.join(self.log_dir, trainer.running_id)
check_path(log_dir)
logger = getattr(trainer, 'logger', None)
if logger is None:
self.logger = SummaryWriter(log_dir=log_dir)
trainer.logger = self.logger
else:
self.logger = logger
def on_before_train_epochs(self, trainer, tasks, epoch_idx):
self.confusion_matrix=None
def on_before_val_epochs(self, trainer, tasks, epoch_idx):
self.confusion_matrix=None
def on_before_test_epochs(self, trainer, tasks):
self.confusion_matrix=None
def on_before_train_batch(self, trainer, batch_x, batch_y, batch_idx):
if self.class_num is None: | self.class_num = get_class_num(batch_x, batch_y) | 5 | 2023-10-19 05:41:48+00:00 | 8k |
Beautifuldog01/AcademicDocumentClassifier_without_AllenNLP | main_meta_CNN.py | [
{
"identifier": "TextClassifier",
"path": "model.py",
"snippet": "class TextClassifier(nn.Module):\n def __init__(self, vocab_size, embedding_dim):\n super(TextClassifier, self).__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim * 6)\n self.title_cnn_3 = nn.Conv1d(embedding_dim * 6, embedding_dim, 3, padding=1)\n self.title_cnn_5 = nn.Conv1d(embedding_dim * 6, embedding_dim, 5, padding=2)\n self.abstract_cnn_3 = nn.Conv1d(embedding_dim * 6, embedding_dim * 2, 3, padding=1)\n self.abstract_cnn_5 = nn.Conv1d(embedding_dim * 6, embedding_dim * 2, 5, padding=2)\n self.classifier = nn.Linear(embedding_dim * 6, 1)\n \n def forward(self, title, abstract):\n title_embed = self.embedding(title)\n abstract_embed = self.embedding(abstract)\n title_features_3 = F.relu(self.title_cnn_3(title_embed.permute(0, 2, 1)))\n title_features_5 = F.relu(self.title_cnn_5(title_embed.permute(0, 2, 1)))\n abstract_features_3 = F.relu(\n self.abstract_cnn_3(abstract_embed.permute(0, 2, 1))\n )\n abstract_features_5 = F.relu(\n self.abstract_cnn_5(abstract_embed.permute(0, 2, 1))\n )\n title_features_3 = F.max_pool1d(\n title_features_3, title_features_3.size(2)\n ).squeeze(2)\n title_features_5 = F.max_pool1d(\n title_features_5, title_features_5.size(2)\n ).squeeze(2)\n abstract_features_3 = F.max_pool1d(\n abstract_features_3, abstract_features_3.size(2)\n ).squeeze(2)\n abstract_features_5 = F.max_pool1d(\n abstract_features_5, abstract_features_5.size(2)\n ).squeeze(2)\n title_features = torch.cat([title_features_3, title_features_5], dim=1)\n abstract_features = torch.cat([abstract_features_3, abstract_features_5], dim=1)\n combined_features = torch.cat([title_features, abstract_features], dim=1)\n output = self.classifier(combined_features)\n return output"
},
{
"identifier": "NonNegativePULoss",
"path": "model.py",
"snippet": "class NonNegativePULoss(nn.Module):\n def __init__(self, prior, positive_class=1, loss=None, gamma=1, beta=0, nnpu=True):\n super(NonNegativePULoss, self).__init__()\n self.prior = prior\n self.gamma = gamma\n self.beta = beta\n self.loss = loss or (lambda x: torch.sigmoid(-x))\n self.nnPU = nnpu\n self.positive = positive_class\n self.unlabeled = 1 - positive_class\n\n def forward(self, x, t):\n t = t[:, None]\n positive, unlabeled = (t == self.positive).float(), (\n t == self.unlabeled\n ).float()\n n_positive, n_unlabeled = max(1.0, positive.sum().item()), max(\n 1.0, unlabeled.sum().item()\n )\n\n y_positive = self.loss(x) # per sample positive risk\n y_unlabeled = self.loss(-x) # per sample negative risk\n\n positive_risk = torch.sum(self.prior * positive / n_positive * y_positive)\n negative_risk = torch.sum(\n (unlabeled / n_unlabeled - self.prior * positive / n_positive) * y_unlabeled\n )\n\n if self.nnPU:\n if negative_risk.item() < -self.beta:\n objective = (\n positive_risk - self.beta + self.gamma * negative_risk\n ).detach() - self.gamma * negative_risk\n else:\n objective = positive_risk + negative_risk\n else:\n objective = positive_risk + negative_risk\n\n return objective"
},
{
"identifier": "make_PU_meta",
"path": "dataset_pubmed.py",
"snippet": "def make_PU_meta(tr, ca, ts):\n tr_df = load_and_transform_data(tr, tr=1)\n ca_df = load_and_transform_data(ca, ca=1)\n ts_df = load_and_transform_data(ts, ts=1)\n\n all_df = pd.concat([tr_df, ca_df, ts_df]).reset_index(drop=True)\n\n for label, df in [(\"Training\", tr_df), (\"Valid\", ca_df), (\"Test\", ts_df)]:\n print(f\"Size of the {label} DataFrame: {df.shape}\")\n print(\n f\"Counts of true labels in {label.lower()} set: {df['label'].value_counts()}\"\n )\n print(\n f\"Counts of pu labels in {label.lower()} set: {df['pulabel'].value_counts()}\"\n )\n print()\n\n return all_df"
},
{
"identifier": "BiDataset",
"path": "dataset_pubmed.py",
"snippet": "class BiDataset(torch.utils.data.Dataset):\n def __init__(self, data, label):\n self.data = data\n self.label = label\n\n def __getitem__(self, index):\n return self.data[index], self.label[index]\n\n def __len__(self):\n return len(self.data)\n\n def normalization(data):\n _range = np.max(data) - np.min(data)\n return (data - np.min(data)) / _range"
},
{
"identifier": "BalancedBatchSampler",
"path": "dataset_pubmed.py",
"snippet": "class BalancedBatchSampler(Sampler):\n def __init__(self, dataset, batch_size, positive_ratio=0.5):\n self.dataset = dataset\n self.batch_size = batch_size\n self.positive_ratio = positive_ratio\n self.all_positive_indices = [\n i for i, label in enumerate(self.dataset.label) if label == 1\n ]\n self.all_negative_indices = [\n i for i, label in enumerate(self.dataset.label) if label == 0\n ]\n\n def __iter__(self):\n total_batches = len(self.dataset) // self.batch_size\n num_positive_per_batch = int(self.batch_size * self.positive_ratio)\n num_negative_per_batch = self.batch_size - num_positive_per_batch\n\n for i in range(total_batches):\n positive_indices = random.choices(\n self.all_positive_indices, k=num_positive_per_batch\n )\n negative_indices = random.choices(\n self.all_negative_indices, k=num_negative_per_batch\n )\n batch_indices = positive_indices + negative_indices\n random.shuffle(batch_indices)\n\n for index in batch_indices:\n yield index\n\n def __len__(self):\n return len(self.dataset) // self.batch_size"
},
{
"identifier": "ProportionalSampler",
"path": "dataset_pubmed.py",
"snippet": "class ProportionalSampler(Sampler):\n def __init__(self, dataset, batch_size, num_cycles):\n self.dataset = dataset\n self.batch_size = batch_size\n self.num_cycles = num_cycles\n\n self.all_positive_indices = [\n i for i, label in enumerate(self.dataset.label) if label == 1\n ]\n self.all_negative_indices = [\n i for i, label in enumerate(self.dataset.label) if label == 0\n ]\n\n self.total_instances = len(self.all_positive_indices) + len(\n self.all_negative_indices\n )\n\n def __iter__(self):\n total_batches = len(self.dataset) // self.batch_size\n smaller_class_len = min(\n len(self.all_positive_indices), len(self.all_negative_indices)\n )\n\n # Calculate the number of positive samples per batch based on the ratio in the dataset\n num_positive_per_batch = max(\n 1, round((smaller_class_len / self.total_instances) * self.batch_size)\n )\n num_negative_per_batch = self.batch_size - num_positive_per_batch\n\n # Backup for reusing samples from the smaller class\n positive_backup = list(self.all_positive_indices)\n negative_backup = list(self.all_negative_indices)\n\n # Counter for the number of cycles the smaller class has been through\n cycle_counter = self.num_cycles\n\n for i in range(total_batches):\n # Replenish the smaller class samples if necessary\n if num_positive_per_batch > len(self.all_positive_indices):\n random.shuffle(positive_backup)\n self.all_positive_indices += positive_backup\n cycle_counter -= 1\n\n if num_negative_per_batch > len(self.all_negative_indices):\n random.shuffle(negative_backup)\n self.all_negative_indices += negative_backup\n cycle_counter -= 1\n\n if cycle_counter == 0:\n break\n\n # Create a balanced batch\n num_positive_per_batch = min(\n num_positive_per_batch, len(self.all_positive_indices)\n )\n if num_positive_per_batch > 0:\n positive_indices = random.sample(\n self.all_positive_indices, num_positive_per_batch\n )\n self.all_positive_indices = [\n x for x in self.all_positive_indices if x not in positive_indices\n ]\n\n num_negative_per_batch = min(\n num_negative_per_batch, len(self.all_negative_indices)\n )\n if num_negative_per_batch > 0:\n negative_indices = random.sample(\n self.all_negative_indices, num_negative_per_batch\n )\n self.all_negative_indices = [\n x for x in self.all_negative_indices if x not in negative_indices\n ]\n\n batch_indices = positive_indices + negative_indices\n random.shuffle(batch_indices)\n\n for index in batch_indices:\n yield index\n\n def __len__(self):\n return len(self.dataset) // self.batch_size"
},
{
"identifier": "set_seed",
"path": "utils.py",
"snippet": "def set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False"
},
{
"identifier": "build_vocab",
"path": "utils.py",
"snippet": "def build_vocab(texts, min_freq=2):\n \"\"\"\n Build vocabulary from a list of texts\n \"\"\"\n tokenized_texts = [word_tokenize(text.lower()) for text in texts]\n counter = Counter(itertools.chain.from_iterable(tokenized_texts))\n\n vocab = {\n word: i + 2\n for i, (word, freq) in enumerate(counter.items())\n if freq >= min_freq\n }\n\n vocab[\"<PAD>\"] = 0\n vocab[\"<UNK>\"] = 1\n\n return vocab"
},
{
"identifier": "getFeatures",
"path": "utils.py",
"snippet": "def getFeatures(data, word_to_index, max_length):\n all_features = []\n\n for index in range(len(data)):\n title_tokens = data.title[index].split()\n abstract_tokens = data.abstract[index].split()\n\n # Convert words to indices\n title_indices = [word_to_index.get(word.lower(), 0) for word in title_tokens]\n abstract_indices = [\n word_to_index.get(word.lower(), 0) for word in abstract_tokens\n ]\n\n # Pad or truncate\n title_indices += [0] * (max_length - len(title_indices))\n title_indices = title_indices[:max_length]\n abstract_indices += [0] * (max_length - len(abstract_indices))\n abstract_indices = abstract_indices[:max_length]\n\n all_features.append((title_indices, abstract_indices))\n\n return all_features"
},
{
"identifier": "get_metric",
"path": "utils.py",
"snippet": "def get_metric(labels, prob, threshold99=None):\n # Move tensors to CPU if they are on GPU\n labels = (\n labels.cpu() if isinstance(labels, torch.Tensor) and labels.is_cuda else labels\n )\n prob = prob.cpu() if isinstance(prob, torch.Tensor) and prob.is_cuda else prob\n\n # Convert to NumPy if they are tensors\n labels = labels.numpy() if isinstance(labels, torch.Tensor) else labels\n prob = prob.numpy() if isinstance(prob, torch.Tensor) else prob\n auc = roc_auc_score(labels, prob)\n r_10, r_20, r_30, r_40, r_50, r_95 = get_rec(labels, prob)\n\n p_mean = np.mean(prob[labels == 1])\n n_mean = np.mean(prob[labels == 0])\n\n prob = torch.tensor(prob)\n threshold = get_threshold_cqy(prob, labels, n_mean, p_mean, step=0.001)\n threshold99 = get_threshold_99(prob, labels)\n preds = torch.gt(prob, threshold).float()\n preds99 = torch.gt(prob, threshold99).float()\n f1, rec = only_positive(labels, preds)\n acc = accuracy_score(labels, preds)\n\n f1_99, rec_99 = only_positive(labels, preds99)\n acc_99 = accuracy_score(labels, preds99)\n\n reduce_work = get_reduction(preds99, labels)\n\n return (\n threshold,\n threshold99,\n auc,\n f1,\n acc,\n rec,\n f1_99,\n acc_99,\n rec_99,\n r_10,\n r_20,\n r_30,\n r_40,\n r_50,\n r_95,\n reduce_work,\n p_mean,\n n_mean,\n )"
},
{
"identifier": "log_metrics",
"path": "utils.py",
"snippet": "def log_metrics(writer, phase, metrics, epoch):\n \"\"\"\n Log metrics using TensorBoard.\n \"\"\"\n (\n threshold,\n threshold99,\n auc,\n f1,\n acc,\n rec,\n f1_99,\n acc_99,\n rec_99,\n r_10,\n r_20,\n r_30,\n r_40,\n r_50,\n r_95,\n reduce_work,\n p_mean,\n n_mean,\n ) = metrics\n\n writer.add_scalar(f\"{phase}/AUC\", auc, epoch)\n writer.add_scalar(f\"{phase}/F1\", f1, epoch)\n writer.add_scalar(f\"{phase}/Accuracy\", acc, epoch)\n writer.add_scalar(f\"{phase}/Recall\", rec, epoch)\n writer.add_scalar(f\"{phase}/F1_99\", f1_99, epoch)\n writer.add_scalar(f\"{phase}/Accuracy_99\", acc_99, epoch)\n writer.add_scalar(f\"{phase}/Recall_99\", rec_99, epoch)\n writer.add_scalar(f\"{phase}/R_10\", r_10, epoch)\n writer.add_scalar(f\"{phase}/R_20\", r_20, epoch)\n writer.add_scalar(f\"{phase}/R_30\", r_30, epoch)\n writer.add_scalar(f\"{phase}/R_40\", r_40, epoch)\n writer.add_scalar(f\"{phase}/R_50\", r_50, epoch)\n writer.add_scalar(f\"{phase}/R_95\", r_95, epoch)\n writer.add_scalar(f\"{phase}/Reduce_Work\", reduce_work, epoch)\n writer.add_scalar(f\"{phase}/Positive_Mean\", p_mean, epoch)\n writer.add_scalar(f\"{phase}/Negative_Mean\", n_mean, epoch)"
},
{
"identifier": "print_info",
"path": "utils.py",
"snippet": "def print_info(info_tuple):\n (\n threshold,\n threshold99,\n auc,\n f1,\n acc,\n rec,\n f1_99,\n acc_99,\n rec_99,\n r_10,\n r_20,\n r_30,\n r_40,\n r_50,\n r_95,\n reduce_work,\n p_mean,\n n_mean,\n ) = info_tuple\n print(\"Gold auc:\", r4(auc))\n print(\"f1, acc, rec: \", r4(f1), r4(acc), r4(rec))\n print(\"f1_99, acc_99, rec_99: \", r4(f1_99), r4(acc_99), r4(rec_99))\n print(\"top@: \", r4(r_10), r4(r_20), r4(r_30), r4(r_40), r4(r_50), r4(r_95))\n print(\"reduce workload: \", r4(reduce_work))\n print(\"threshold: \", r4(threshold))\n print(\"threshold99: \", r4(threshold99))\n print(\"Gold positive mean:\", r4(p_mean))\n print(\"Gold negative mean:\", r4(n_mean))\n print()"
}
] | import os
import datetime
import argparse
import torch
import numpy as np
import torch.optim as optim
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch.nn import BCEWithLogitsLoss
from model import TextClassifier, NonNegativePULoss
from dataset_pubmed import (
make_PU_meta,
BiDataset,
BalancedBatchSampler,
ProportionalSampler,
)
from utils import (
set_seed,
build_vocab,
getFeatures,
get_metric,
log_metrics,
print_info,
) | 4,784 |
parser = argparse.ArgumentParser(description="Run Text Classification Experiments")
parser.add_argument(
"--batch_size", type=int, default=128, help="Batch size for training"
)
parser.add_argument(
"--num_epochs", type=int, default=50, help="Number of training epochs"
)
parser.add_argument(
"--lr", type=float, default=0.001, help="Learning rate for the optimizer"
)
parser.add_argument(
"--prior",
type=float,
default=0.5,
help="Prior probability for Non-Negative PU Loss",
)
parser.add_argument(
"--max_length", type=int, default=800, help="Maximum length of the input sequence"
)
parser.add_argument(
"--embedding_dim",
type=int,
default=50,
help="Embedding dimension for text classifier",
)
parser.add_argument(
"--models_dir", type=str, default="models", help="Directory to save the models"
)
parser.add_argument(
"--seed", type=int, default=42, help="Random seed for reproducibility"
)
args = parser.parse_args()
batch_size = args.batch_size
num_epochs = args.num_epochs
learning_rate = args.lr
prior = args.prior
embedding_dim = args.embedding_dim
models_dir = args.models_dir
set_seed(args.seed)
experiments = [
"data/pubmed-dse/L50/D000328.D008875.D015658",
"data/pubmed-dse/L50/D000818.D001921.D051381",
"data/pubmed-dse/L50/D006435.D007676.D008875",
"data/pubmed-dse/L20/D000328.D008875.D015658",
"data/pubmed-dse/L20/D000818.D001921.D051381",
"data/pubmed-dse/L20/D006435.D007676.D008875",
]
root_dir = experiments[1]
tr_file_path = os.path.join(root_dir, "train.jsonl")
va_file_path = os.path.join(root_dir, "valid.jsonl")
ts_file_path = os.path.join(root_dir, "test.jsonl")
all_df = make_PU_meta(tr_file_path, va_file_path, ts_file_path)
train_index = all_df.query("tr == 1").index
train_labels = all_df.query("tr == 1")["pulabel"].values
val_index = all_df.query("ca == 1").index
val_labels = all_df.query("ca == 1")["label"].values
test_index = all_df.query("ts == 1").index
test_labels = all_df.query("ts == 1")["label"].values
all_df["combined_text"] = all_df["title"] + " " + all_df["abstract"]
all_texts = all_df["combined_text"].tolist()
vocab = build_vocab(all_texts)
word_to_index = {word: index for index, word in enumerate(vocab)}
max_length = args.max_length
all_features = getFeatures(all_df, word_to_index, max_length=max_length)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = TextClassifier(len(vocab), embedding_dim).to(device)
loss_fct = NonNegativePULoss(prior=prior)
# loss_fct = BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
best_va_f1 = 0
best_ts_f1 = 0
writer = SummaryWriter("runs/nnPU_CNN")
|
parser = argparse.ArgumentParser(description="Run Text Classification Experiments")
parser.add_argument(
"--batch_size", type=int, default=128, help="Batch size for training"
)
parser.add_argument(
"--num_epochs", type=int, default=50, help="Number of training epochs"
)
parser.add_argument(
"--lr", type=float, default=0.001, help="Learning rate for the optimizer"
)
parser.add_argument(
"--prior",
type=float,
default=0.5,
help="Prior probability for Non-Negative PU Loss",
)
parser.add_argument(
"--max_length", type=int, default=800, help="Maximum length of the input sequence"
)
parser.add_argument(
"--embedding_dim",
type=int,
default=50,
help="Embedding dimension for text classifier",
)
parser.add_argument(
"--models_dir", type=str, default="models", help="Directory to save the models"
)
parser.add_argument(
"--seed", type=int, default=42, help="Random seed for reproducibility"
)
args = parser.parse_args()
batch_size = args.batch_size
num_epochs = args.num_epochs
learning_rate = args.lr
prior = args.prior
embedding_dim = args.embedding_dim
models_dir = args.models_dir
set_seed(args.seed)
experiments = [
"data/pubmed-dse/L50/D000328.D008875.D015658",
"data/pubmed-dse/L50/D000818.D001921.D051381",
"data/pubmed-dse/L50/D006435.D007676.D008875",
"data/pubmed-dse/L20/D000328.D008875.D015658",
"data/pubmed-dse/L20/D000818.D001921.D051381",
"data/pubmed-dse/L20/D006435.D007676.D008875",
]
root_dir = experiments[1]
tr_file_path = os.path.join(root_dir, "train.jsonl")
va_file_path = os.path.join(root_dir, "valid.jsonl")
ts_file_path = os.path.join(root_dir, "test.jsonl")
all_df = make_PU_meta(tr_file_path, va_file_path, ts_file_path)
train_index = all_df.query("tr == 1").index
train_labels = all_df.query("tr == 1")["pulabel"].values
val_index = all_df.query("ca == 1").index
val_labels = all_df.query("ca == 1")["label"].values
test_index = all_df.query("ts == 1").index
test_labels = all_df.query("ts == 1")["label"].values
all_df["combined_text"] = all_df["title"] + " " + all_df["abstract"]
all_texts = all_df["combined_text"].tolist()
vocab = build_vocab(all_texts)
word_to_index = {word: index for index, word in enumerate(vocab)}
max_length = args.max_length
all_features = getFeatures(all_df, word_to_index, max_length=max_length)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = TextClassifier(len(vocab), embedding_dim).to(device)
loss_fct = NonNegativePULoss(prior=prior)
# loss_fct = BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
best_va_f1 = 0
best_ts_f1 = 0
writer = SummaryWriter("runs/nnPU_CNN") | train_data = BiDataset( | 3 | 2023-10-18 06:15:13+00:00 | 8k |
colour-science/colour-visuals | colour_visuals/diagrams.py | [
{
"identifier": "DEFAULT_FLOAT_DTYPE_WGPU",
"path": "colour_visuals/common.py",
"snippet": "DEFAULT_FLOAT_DTYPE_WGPU = np.float32"
},
{
"identifier": "DEFAULT_INT_DTYPE_WGPU",
"path": "colour_visuals/common.py",
"snippet": "DEFAULT_INT_DTYPE_WGPU = np.uint32"
},
{
"identifier": "XYZ_to_colourspace_model",
"path": "colour_visuals/common.py",
"snippet": "def XYZ_to_colourspace_model(\n XYZ: ArrayLike,\n illuminant: ArrayLike,\n model: LiteralColourspaceModel | str = \"CIE xyY\",\n normalise_model: bool = True,\n **kwargs,\n) -> NDArray:\n \"\"\"\n Convert from *CIE XYZ* tristimulus values to given colourspace model while\n normalising some of the absolute models.\n\n Parameters\n ----------\n XYZ\n *CIE XYZ* tristimulus values to convert to given colourspace model.\n illuminant\n Reference *illuminant* *CIE xy* chromaticity coordinates or *CIE xyY*\n colourspace array.\n model\n Colourspace model, see :attr:`colour.COLOURSPACE_MODELS` attribute for\n the list of supported colourspace models.\n normalise_model\n Whether to normalise colourspace models such as :math:`IC_TC_P` and\n :math:`J_za_zb_z`.\n\n Other Parameters\n ----------------\n kwargs\n See the documentation of the supported conversion definitions.\n\n Returns\n -------\n Any\n Converted *CIE XYZ* tristimulus values.\n \"\"\"\n\n ijk = convert(\n XYZ,\n \"CIE XYZ\",\n model,\n illuminant=illuminant,\n verbose={\"mode\": \"Short\"},\n **kwargs,\n )\n\n if normalise_model:\n if model == \"ICtCp\":\n ijk /= XYZ_to_ICtCp([1, 1, 1])[0]\n elif model == \"JzAzBz\":\n ijk /= XYZ_to_Jzazbz([1, 1, 1])[0]\n\n return ijk"
},
{
"identifier": "append_channel",
"path": "colour_visuals/common.py",
"snippet": "def append_channel(a: ArrayLike, value: float = 1) -> NDArray:\n \"\"\"\n Append a channel to given variable :math:`a`.\n\n Parameters\n ----------\n a\n Variable :math:`a` to append a channel to.\n value\n Channel value.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Variable :math:`a` with appended channel.\n \"\"\"\n\n a = np.copy(a)\n\n return np.hstack( # pyright: ignore\n [\n a,\n full(\n (*list(a.shape[:-1]), 1),\n value,\n dtype=a.dtype, # pyright: ignore\n ),\n ]\n )"
},
{
"identifier": "as_contiguous_array",
"path": "colour_visuals/common.py",
"snippet": "def as_contiguous_array(\n a: NDArray, dtype: Type[DType] = DEFAULT_FLOAT_DTYPE_WGPU\n) -> NDArray:\n \"\"\"\n Convert given array to a contiguous array (ndim >= 1) in memory (C order).\n\n Parameters\n ----------\n a\n Variable :math:`a` to convert.\n dtype\n :class:`numpy.dtype` to use for conversion, default to the\n :class:`numpy.dtype` defined by the\n :attr:`colour.constant.DEFAULT_FLOAT_DTYPE_WGPU` attribute.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Converted variable :math:`a`.\n \"\"\"\n\n return np.ascontiguousarray(a.astype(dtype))"
},
{
"identifier": "MixinPropertyCMFS",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertyCMFS:\n \"\"\"\n Define a mixin for a standard observer colour matching functions,\n default to the *CIE 1931 2 Degree Standard Observer*.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyCMFS.cmfs`\n \"\"\"\n\n def __init__(self):\n self._cmfs = MSDS_CMFS[\"CIE 1931 2 Degree Standard Observer\"]\n\n super().__init__()\n\n @visual_property\n def cmfs(\n self,\n ) -> (\n MultiSpectralDistributions\n | str\n | Sequence[MultiSpectralDistributions | str]\n ):\n \"\"\"\n Getter and setter property for the standard observer colour matching\n functions.\n\n Parameters\n ----------\n value\n Value to set the standard observer colour matching functions with.\n\n Returns\n -------\n :class:`colour.MultiSpectralDistributions` or :class:`str` or \\\n:class:`Sequence`\n Standard observer colour matching functions.\n \"\"\"\n\n return self._cmfs\n\n @cmfs.setter\n def cmfs(\n self,\n value: MultiSpectralDistributions\n | str\n | Sequence[MultiSpectralDistributions | str],\n ):\n \"\"\"Setter for the **self.cmfs** property.\"\"\"\n\n self._cmfs = cast(\n MultiSpectralDistributions,\n first_item(filter_cmfs(value).values()),\n )"
},
{
"identifier": "MixinPropertyColour",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertyColour:\n \"\"\"\n Define a mixin for a colour.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyColour.colour`\n \"\"\"\n\n def __init__(self):\n self._colour = None\n\n super().__init__()\n\n @visual_property\n def colour(self) -> ArrayLike | None:\n \"\"\"\n Getter and setter property for the colour.\n\n Parameters\n ----------\n value\n Value to set the colour with.\n\n Returns\n -------\n ArrayLike or None\n Visual colour.\n \"\"\"\n\n return self._colour\n\n @colour.setter\n def colour(self, value: ArrayLike | None):\n \"\"\"Setter for the **self.colour** property.\"\"\"\n\n self._colour = value"
},
{
"identifier": "MixinPropertyKwargs",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertyKwargs:\n \"\"\"\n Define a mixin for keyword arguments.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyKwargs.kwargs`\n \"\"\"\n\n def __init__(self):\n self._kwargs = {}\n\n super().__init__()\n\n @visual_property\n def kwargs(self) -> dict:\n \"\"\"\n Getter and setter property for the keyword arguments.\n\n Parameters\n ----------\n value\n Value to set keyword arguments with.\n\n Returns\n -------\n :class:`dict`\n Keyword arguments.\n \"\"\"\n\n return self._kwargs\n\n @kwargs.setter\n def kwargs(self, value: dict):\n \"\"\"Setter for the **self.kwargs** property.\"\"\"\n\n self._kwargs = value"
},
{
"identifier": "MixinPropertyMethod",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertyMethod:\n \"\"\"\n Define a mixin for a *Chromaticity Diagram* method.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyMethod.method`\n \"\"\"\n\n def __init__(self):\n self._method = \"CIE 1931\"\n\n super().__init__()\n\n @visual_property\n def method(\n self,\n ) -> Literal[\"CIE 1931\", \"CIE 1960 UCS\", \"CIE 1976 UCS\"] | str:\n \"\"\"\n Getter and setter property for the *Chromaticity Diagram* method.\n\n Parameters\n ----------\n value\n Value to set the *Chromaticity Diagram* method with.\n\n Returns\n -------\n :class:`str`\n *Chromaticity Diagram* method.\n \"\"\"\n\n return self._method\n\n @method.setter\n def method(\n self, value: Literal[\"CIE 1931\", \"CIE 1960 UCS\", \"CIE 1976 UCS\"] | str\n ):\n \"\"\"Setter for the **self.method** property.\"\"\"\n\n self._method = validate_method(\n value, tuple(METHODS_CHROMATICITY_DIAGRAM)\n )"
},
{
"identifier": "MixinPropertyModel",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertyModel:\n \"\"\"\n Define a mixin for a colourspace model.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyModel.model`\n \"\"\"\n\n def __init__(self):\n self._model = \"CIE xyY\"\n\n super().__init__()\n\n @visual_property\n def model(self) -> LiteralColourspaceModel | str:\n \"\"\"\n Getter and setter property for the colourspace model.\n\n Parameters\n ----------\n value\n Value to set the colourspace model with.\n\n Returns\n -------\n :class:`str`\n Colourspace model.\n \"\"\"\n\n return self._model\n\n @model.setter\n def model(self, value: LiteralColourspaceModel | str):\n \"\"\"Setter for the **self.model** property.\"\"\"\n\n self._model = validate_method(value, tuple(COLOURSPACE_MODELS))"
},
{
"identifier": "MixinPropertyOpacity",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertyOpacity:\n \"\"\"\n Define a mixin for an opacity value.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyOpacity.opacity`\n \"\"\"\n\n def __init__(self):\n self._opacity = 1\n\n super().__init__()\n\n @visual_property\n def opacity(self) -> float:\n \"\"\"\n Getter and setter property for the opacity value.\n\n Parameters\n ----------\n value\n Value to set the opacity value with.\n\n Returns\n -------\n :class:`float`\n Visual opacity.\n \"\"\"\n\n return self._opacity\n\n @opacity.setter\n def opacity(self, value: float):\n \"\"\"Setter for the **self.opacity** property.\"\"\"\n\n self._opacity = value"
},
{
"identifier": "MixinPropertySamples",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertySamples:\n \"\"\"\n Define a mixin for a sample count.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertySamples.samples`\n \"\"\"\n\n def __init__(self):\n self._samples = 1\n\n super().__init__()\n\n @visual_property\n def samples(self) -> int:\n \"\"\"\n Getter and setter property for the sample count.\n\n Parameters\n ----------\n value\n Value to set sample count with.\n\n Returns\n -------\n :class:`int`\n Sample count.\n \"\"\"\n\n return self._samples\n\n @samples.setter\n def samples(self, value: int):\n \"\"\"Setter for the **self.samples** property.\"\"\"\n\n self._samples = value"
},
{
"identifier": "MixinPropertyThickness",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertyThickness:\n \"\"\"\n Define a mixin for a thickness value.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyThickness.thickness`\n \"\"\"\n\n def __init__(self):\n self._thickness = 1\n\n super().__init__()\n\n @visual_property\n def thickness(self) -> float:\n \"\"\"\n Getter and setter property for the thickness value.\n\n Parameters\n ----------\n value\n Value to set the thickness value with.\n\n Returns\n -------\n :class:`float`\n Thickness value.\n \"\"\"\n\n return self._thickness\n\n @thickness.setter\n def thickness(self, value: float):\n \"\"\"Setter for the **self.thickness** property.\"\"\"\n\n self._thickness = value"
},
{
"identifier": "MixinPropertyTypeMaterial",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertyTypeMaterial:\n \"\"\"\n Define a mixin for a material type.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyTypeMaterial.type_material`\n \"\"\"\n\n def __init__(self):\n self._type_material = gfx.MeshBasicMaterial\n\n super().__init__()\n\n @visual_property\n def type_material(\n self,\n ) -> Type[gfx.MeshAbstractMaterial]:\n \"\"\"\n Getter and setter property for the material type.\n\n Parameters\n ----------\n value\n Value to set the material type with.\n\n Returns\n -------\n :class:`gfx.MeshAbstractMaterial`\n Material type.\n \"\"\"\n\n return self._type_material\n\n @type_material.setter\n def type_material(self, value: Type[gfx.MeshAbstractMaterial]):\n \"\"\"Setter for the **self.material** property.\"\"\"\n\n self._type_material = value"
},
{
"identifier": "MixinPropertyWireframe",
"path": "colour_visuals/visual.py",
"snippet": "class MixinPropertyWireframe:\n \"\"\"\n Define a mixin for a wireframe state.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyWireframe.wireframe`\n \"\"\"\n\n def __init__(self):\n self._wireframe = False\n\n super().__init__()\n\n @visual_property\n def wireframe(self) -> bool:\n \"\"\"\n Getter and setter property for the wireframe state.\n\n Parameters\n ----------\n value\n Value to set wireframe state with.\n\n Returns\n -------\n :class:`bool`\n Wireframe state.\n \"\"\"\n\n return self._wireframe\n\n @wireframe.setter\n def wireframe(self, value: bool):\n \"\"\"Setter for the **self.wireframe** property.\"\"\"\n\n self._wireframe = value"
},
{
"identifier": "Visual",
"path": "colour_visuals/visual.py",
"snippet": "class Visual(gfx.Group, metaclass=ABCMeta):\n \"\"\"Define the base class for the visuals.\"\"\"\n\n def __init__(self):\n self._is_update_blocked = False\n\n super().__init__()\n\n @contextmanager\n def block_update(self) -> Generator:\n \"\"\"Define a context manager that blocks the visual updates.\"\"\"\n self._is_update_blocked = True\n\n yield\n\n self._is_update_blocked = False\n\n @abstractmethod\n def update(self):\n \"\"\"\n Update the visual.\n\n Notes\n -----\n - Must be reimplemented by sub-classes.\n \"\"\""
},
{
"identifier": "visual_property",
"path": "colour_visuals/visual.py",
"snippet": "class visual_property(property):\n \"\"\"\n Define a :class:`property` sub-class calling the\n :class:`colour_visuals.Visual.update` method.\n \"\"\"\n\n def __set__(self, obj, value):\n \"\"\"Reimplement the :class:`property.__set__` method.\"\"\"\n super().__set__(obj, value)\n\n obj.update()"
}
] | import numpy as np
import pygfx as gfx
from colour.algebra import euclidean_distance, normalise_maximum
from colour.colorimetry import MultiSpectralDistributions
from colour.hints import (
ArrayLike,
Literal,
LiteralColourspaceModel,
Sequence,
Type,
cast,
)
from colour.models import XYZ_to_RGB
from colour.plotting import (
CONSTANTS_COLOUR_STYLE,
LABELS_CHROMATICITY_DIAGRAM_DEFAULT,
METHODS_CHROMATICITY_DIAGRAM,
XYZ_to_plotting_colourspace,
colourspace_model_axis_reorder,
)
from colour.plotting.diagrams import lines_spectral_locus
from colour.utilities import (
full,
optional,
tstack,
)
from scipy.spatial import Delaunay
from colour_visuals.common import (
DEFAULT_FLOAT_DTYPE_WGPU,
DEFAULT_INT_DTYPE_WGPU,
XYZ_to_colourspace_model,
append_channel,
as_contiguous_array,
)
from colour_visuals.visual import (
MixinPropertyCMFS,
MixinPropertyColour,
MixinPropertyKwargs,
MixinPropertyMethod,
MixinPropertyModel,
MixinPropertyOpacity,
MixinPropertySamples,
MixinPropertyThickness,
MixinPropertyTypeMaterial,
MixinPropertyWireframe,
Visual,
visual_property,
) | 5,938 |
Attributes
----------
- :attr:`~colour_visuals.VisualSpectralLocus3D.cmfs`
- :attr:`~colour_visuals.VisualSpectralLocus3D.model`
- :attr:`~colour_visuals.VisualSpectralLocus3D.labels`
- :attr:`~colour_visuals.VisualSpectralLocus3D.colour`
- :attr:`~colour_visuals.VisualSpectralLocus3D.opacity`
- :attr:`~colour_visuals.VisualSpectralLocus3D.thickness`
Methods
-------
- :meth:`~colour_visuals.VisualSpectralLocus3D.__init__`
- :meth:`~colour_visuals.VisualSpectralLocus3D.update`
Examples
--------
>>> import os
>>> from colour.utilities import suppress_stdout
>>> from wgpu.gui.auto import WgpuCanvas
>>> with suppress_stdout():
... canvas = WgpuCanvas(size=(960, 540))
... scene = gfx.Scene()
... scene.add(
... gfx.Background(
... None, gfx.BackgroundMaterial(np.array([0.18, 0.18, 0.18]))
... )
... )
... visual = VisualSpectralLocus3D(model="CIE XYZ")
... camera = gfx.PerspectiveCamera(50, 16 / 9)
... camera.show_object(visual, up=np.array([0, 0, 1]), scale=1.25)
... scene.add(visual)
... if os.environ.get("CI") is None:
... gfx.show(scene, camera=camera, canvas=canvas)
...
.. image:: ../_static/Plotting_VisualSpectralLocus3D.png
:align: center
:alt: visual-spectral-locus-3d
"""
def __init__(
self,
cmfs: MultiSpectralDistributions
| str
| Sequence[
MultiSpectralDistributions | str
] = "CIE 1931 2 Degree Standard Observer",
model: LiteralColourspaceModel | str = "CIE xyY",
colour: ArrayLike | None = None,
opacity: float = 1,
thickness: float = 1,
**kwargs,
):
super().__init__()
self._spectral_locus = None
with self.block_update():
self.cmfs = cmfs
self.model = model
self.colour = colour
self.opacity = opacity
self.thickness = thickness
self.kwargs = kwargs
self.update()
def update(self):
"""Update the visual."""
if self._is_update_blocked:
return
self.clear()
colourspace = CONSTANTS_COLOUR_STYLE.colour.colourspace
positions = colourspace_model_axis_reorder(
XYZ_to_colourspace_model(
self._cmfs.values,
colourspace.whitepoint,
self._model,
**self._kwargs,
),
self._model,
)
positions = np.concatenate(
[positions[:-1], positions[1:]], axis=1
).reshape([-1, 3])
if self._colour is None:
colour = XYZ_to_RGB(self._cmfs.values, colourspace)
colour = np.concatenate([colour[:-1], colour[1:]], axis=1).reshape(
[-1, 3]
)
else:
colour = np.tile(self._colour, (positions.shape[0], 1))
self._spectral_locus = gfx.Line(
gfx.Geometry(
positions=as_contiguous_array(positions),
colors=as_contiguous_array(
append_channel(colour, self._opacity)
),
),
gfx.LineSegmentMaterial(
thickness=self._thickness, color_mode="vertex"
),
)
self.add(self._spectral_locus)
class VisualChromaticityDiagram(
MixinPropertyCMFS,
MixinPropertyColour,
MixinPropertyTypeMaterial,
MixinPropertyMethod,
MixinPropertyOpacity,
MixinPropertySamples,
| # !/usr/bin/env python
"""
Chromaticity Diagram Visuals
============================
Defines the *Chromaticity Diagram* visuals:
- :class:`colour_visuals.VisualSpectralLocus2D`
- :class:`colour_visuals.VisualSpectralLocus3D`
- :class:`colour_visuals.VisualChromaticityDiagram`
- :class:`colour_visuals.VisualChromaticityDiagramCIE1931`
- :class:`colour_visuals.VisualChromaticityDiagramCIE1960UCS`
- :class:`colour_visuals.VisualChromaticityDiagramCIE1976UCS`
"""
from __future__ import annotations
__author__ = "Colour Developers"
__copyright__ = "Copyright 2023 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"VisualSpectralLocus2D",
"VisualSpectralLocus3D",
"VisualChromaticityDiagram",
"MixinPropertyKwargsVisualSpectralLocus",
"MixinPropertyKwargsVisualChromaticityDiagram",
"VisualChromaticityDiagramCIE1931",
"VisualChromaticityDiagramCIE1960UCS",
"VisualChromaticityDiagramCIE1976UCS",
]
class VisualSpectralLocus2D(
MixinPropertyCMFS,
MixinPropertyColour,
MixinPropertyMethod,
MixinPropertyOpacity,
MixinPropertyThickness,
Visual,
):
"""
Create a 2D *Spectral Locus* visual.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectrum domain and colours. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.common.filter_cmfs` definition.
method
*Chromaticity Diagram* method.
labels
Array of wavelength labels used to customise which labels will be drawn
around the spectral locus. Passing an empty array will result in no
wavelength labels being drawn.
colour
Colour of the visual, if *None*, the colour is computed from the visual
geometry.
opacity
Opacity of the visual.
thickness
Thickness of the visual lines.
Attributes
----------
- :attr:`~colour_visuals.VisualSpectralLocus2D.cmfs`
- :attr:`~colour_visuals.VisualSpectralLocus2D.method`
- :attr:`~colour_visuals.VisualSpectralLocus2D.labels`
- :attr:`~colour_visuals.VisualSpectralLocus2D.colour`
- :attr:`~colour_visuals.VisualSpectralLocus2D.opacity`
- :attr:`~colour_visuals.VisualSpectralLocus2D.thickness`
Methods
-------
- :meth:`~colour_visuals.VisualSpectralLocus2D.__init__`
- :meth:`~colour_visuals.VisualSpectralLocus2D.update`
Examples
--------
>>> import os
>>> from colour.utilities import suppress_stdout
>>> from wgpu.gui.auto import WgpuCanvas
>>> with suppress_stdout():
... canvas = WgpuCanvas(size=(960, 540))
... scene = gfx.Scene()
... scene.add(
... gfx.Background(
... None, gfx.BackgroundMaterial(np.array([0.18, 0.18, 0.18]))
... )
... )
... visual = VisualSpectralLocus2D()
... camera = gfx.PerspectiveCamera(50, 16 / 9)
... camera.show_object(visual, up=np.array([0, 0, 1]), scale=1.25)
... scene.add(visual)
... if os.environ.get("CI") is None:
... gfx.show(scene, camera=camera, canvas=canvas)
...
.. image:: ../_static/Plotting_VisualSpectralLocus2D.png
:align: center
:alt: visual-spectral-locus-2d
"""
def __init__(
self,
cmfs: MultiSpectralDistributions
| str
| Sequence[
MultiSpectralDistributions | str
] = "CIE 1931 2 Degree Standard Observer",
method: Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"]
| str = "CIE 1931",
labels: Sequence | None = None,
colour: ArrayLike | None = None,
opacity: float = 1,
thickness: float = 1,
):
super().__init__()
self._spectral_locus = None
self._wavelengths = None
self._texts = None
self._points = None
self._labels = None
with self.block_update():
self.cmfs = cmfs
self.method = method
self.labels = labels
self.colour = colour
self.opacity = opacity
self.thickness = thickness
self.update()
@visual_property
def labels(
self,
) -> Sequence | None:
"""
Getter and setter property for the labels.
Parameters
----------
value
Value to set the labels with.
Returns
-------
:class:`str`
Labels.
"""
return self._labels
@labels.setter
def labels(self, value: Sequence | None):
"""Setter for the **self.labels** property."""
self._labels = cast(
Sequence,
optional(value, LABELS_CHROMATICITY_DIAGRAM_DEFAULT[self._method]),
)
def update(self):
"""Update the visual."""
if self._is_update_blocked:
return
self.clear()
lines_sl, lines_w = lines_spectral_locus(
self._cmfs, self._labels, self._method
)
# Spectral Locus
positions = np.concatenate(
[lines_sl["position"][:-1], lines_sl["position"][1:]], axis=1
).reshape([-1, 2])
positions = np.hstack(
[
positions,
np.full((positions.shape[0], 1), 0, DEFAULT_FLOAT_DTYPE_WGPU),
]
)
if self._colour is None:
colour_sl = np.concatenate(
[lines_sl["colour"][:-1], lines_sl["colour"][1:]], axis=1
).reshape([-1, 3])
else:
colour_sl = np.tile(self._colour, (positions.shape[0], 1))
self._spectral_locus = gfx.Line(
gfx.Geometry(
positions=as_contiguous_array(positions),
colors=as_contiguous_array(
append_channel(colour_sl, self._opacity)
),
),
gfx.LineSegmentMaterial(
thickness=self._thickness, color_mode="vertex"
),
)
self.add(self._spectral_locus)
if not self._labels:
return
# Wavelengths
positions = lines_w["position"]
positions = np.hstack(
[
positions,
np.full((positions.shape[0], 1), 0, DEFAULT_FLOAT_DTYPE_WGPU),
]
)
if self._colour is None:
colour_w = lines_w["colour"]
else:
colour_w = np.tile(self._colour, (positions.shape[0], 1))
self._wavelengths = gfx.Line(
gfx.Geometry(
positions=as_contiguous_array(positions),
colors=as_contiguous_array(
append_channel(colour_w, self._opacity)
),
),
gfx.LineSegmentMaterial(
thickness=self._thickness, color_mode="vertex"
),
)
self.add(self._wavelengths)
# Labels
self._texts = []
for i, label in enumerate(
[
label
for label in self._labels
if label in self._cmfs.wavelengths
]
):
positions = lines_w["position"][::2]
normals = lines_w["normal"][::2]
text = gfx.Text(
gfx.TextGeometry(
str(label),
font_size=CONSTANTS_COLOUR_STYLE.font.size,
screen_space=True,
anchor="Center-Left"
if lines_w["normal"][::2][i, 0] >= 0
else "Center-Right",
),
gfx.TextMaterial(color=CONSTANTS_COLOUR_STYLE.colour.light),
)
text.local.position = np.array(
[
positions[i, 0] + normals[i, 0] / 50 * 1.25,
positions[i, 1] + normals[i, 1] / 50 * 1.25,
0,
]
)
self._texts.append(text)
self.add(text)
positions = np.hstack(
[
lines_w["position"][::2],
np.full(
(lines_w["position"][::2].shape[0], 1),
0,
DEFAULT_FLOAT_DTYPE_WGPU,
),
]
)
if self._colour is None:
colour_lp = lines_w["colour"][::2]
else:
colour_lp = np.tile(self._colour, (positions.shape[0], 1))
self._points = gfx.Points(
gfx.Geometry(
positions=as_contiguous_array(positions),
sizes=as_contiguous_array(
full(
lines_w["position"][::2].shape[0], self._thickness * 3
)
),
colors=as_contiguous_array(
append_channel(colour_lp, self._opacity)
),
),
gfx.PointsMaterial(color_mode="vertex", vertex_sizes=True),
)
self.add(self._points)
class VisualSpectralLocus3D(
MixinPropertyCMFS,
MixinPropertyColour,
MixinPropertyKwargs,
MixinPropertyModel,
MixinPropertyOpacity,
MixinPropertyThickness,
Visual,
):
"""
Create a 3D *Spectral Locus* visual.
Parameters
----------
cmfs
Standard observer colour matching functions used for computing the
spectrum domain and colours. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.common.filter_cmfs` definition.
model
Colourspace model, see :attr:`colour.COLOURSPACE_MODELS` attribute for
the list of supported colourspace models.
labels
Array of wavelength labels used to customise which labels will be drawn
around the spectral locus. Passing an empty array will result in no
wavelength labels being drawn.
colour
Colour of the visual, if *None*, the colour is computed from the visual
geometry.
opacity
Opacity of the visual.
thickness
Thickness of the visual lines.
Other Parameters
----------------
kwargs
See the documentation of the supported conversion definitions.
Attributes
----------
- :attr:`~colour_visuals.VisualSpectralLocus3D.cmfs`
- :attr:`~colour_visuals.VisualSpectralLocus3D.model`
- :attr:`~colour_visuals.VisualSpectralLocus3D.labels`
- :attr:`~colour_visuals.VisualSpectralLocus3D.colour`
- :attr:`~colour_visuals.VisualSpectralLocus3D.opacity`
- :attr:`~colour_visuals.VisualSpectralLocus3D.thickness`
Methods
-------
- :meth:`~colour_visuals.VisualSpectralLocus3D.__init__`
- :meth:`~colour_visuals.VisualSpectralLocus3D.update`
Examples
--------
>>> import os
>>> from colour.utilities import suppress_stdout
>>> from wgpu.gui.auto import WgpuCanvas
>>> with suppress_stdout():
... canvas = WgpuCanvas(size=(960, 540))
... scene = gfx.Scene()
... scene.add(
... gfx.Background(
... None, gfx.BackgroundMaterial(np.array([0.18, 0.18, 0.18]))
... )
... )
... visual = VisualSpectralLocus3D(model="CIE XYZ")
... camera = gfx.PerspectiveCamera(50, 16 / 9)
... camera.show_object(visual, up=np.array([0, 0, 1]), scale=1.25)
... scene.add(visual)
... if os.environ.get("CI") is None:
... gfx.show(scene, camera=camera, canvas=canvas)
...
.. image:: ../_static/Plotting_VisualSpectralLocus3D.png
:align: center
:alt: visual-spectral-locus-3d
"""
def __init__(
self,
cmfs: MultiSpectralDistributions
| str
| Sequence[
MultiSpectralDistributions | str
] = "CIE 1931 2 Degree Standard Observer",
model: LiteralColourspaceModel | str = "CIE xyY",
colour: ArrayLike | None = None,
opacity: float = 1,
thickness: float = 1,
**kwargs,
):
super().__init__()
self._spectral_locus = None
with self.block_update():
self.cmfs = cmfs
self.model = model
self.colour = colour
self.opacity = opacity
self.thickness = thickness
self.kwargs = kwargs
self.update()
def update(self):
"""Update the visual."""
if self._is_update_blocked:
return
self.clear()
colourspace = CONSTANTS_COLOUR_STYLE.colour.colourspace
positions = colourspace_model_axis_reorder(
XYZ_to_colourspace_model(
self._cmfs.values,
colourspace.whitepoint,
self._model,
**self._kwargs,
),
self._model,
)
positions = np.concatenate(
[positions[:-1], positions[1:]], axis=1
).reshape([-1, 3])
if self._colour is None:
colour = XYZ_to_RGB(self._cmfs.values, colourspace)
colour = np.concatenate([colour[:-1], colour[1:]], axis=1).reshape(
[-1, 3]
)
else:
colour = np.tile(self._colour, (positions.shape[0], 1))
self._spectral_locus = gfx.Line(
gfx.Geometry(
positions=as_contiguous_array(positions),
colors=as_contiguous_array(
append_channel(colour, self._opacity)
),
),
gfx.LineSegmentMaterial(
thickness=self._thickness, color_mode="vertex"
),
)
self.add(self._spectral_locus)
class VisualChromaticityDiagram(
MixinPropertyCMFS,
MixinPropertyColour,
MixinPropertyTypeMaterial,
MixinPropertyMethod,
MixinPropertyOpacity,
MixinPropertySamples, | MixinPropertyWireframe, | 14 | 2023-10-15 04:30:47+00:00 | 8k |
JiahuiLei/NAP | eval/save_viz_utils.py | [
{
"identifier": "get_G_from_VE",
"path": "object_utils/arti_graph_utils_v3.py",
"snippet": "def get_G_from_VE(V, E):\n # v: [mask_occ(1), bbox(3), r_gl(3), t_gl(3) | additional codes in the future]\n # e: [type(3), plucker(6), rlim(2), plim(2)]\n # ! warning, here occ v mask must after sigmoid;\n if isinstance(V, torch.Tensor):\n V = V.cpu().numpy()\n if isinstance(E, torch.Tensor):\n E = E.cpu().numpy()\n v_mask = V[:, 0] > 0.5\n K = len(v_mask)\n assert len(E) == int(K * (K - 1) / 2), f\"len(E)={len(E)}, K={K}\"\n v = V[v_mask]\n n_v = len(v)\n # assert n_v >= 2, f\"n_v={n_v}\"\n original_vid = np.arange(K)[v_mask].tolist()\n G = nx.Graph()\n if n_v >= 2:\n node_color_list = cm.hsv(np.linspace(0, 1, n_v + 1))[:-1]\n # Fill in VTX\n for vid in range(n_v):\n G.add_node(vid)\n _r = torch.as_tensor(v[vid][4:7])\n R = axis_angle_to_matrix(_r).cpu().numpy()\n v_attr = {\n \"vid\": vid,\n \"bbox\": v[vid][1:4],\n \"R\": R.copy(),\n \"t\": v[vid][7:10].copy(),\n \"color\": node_color_list[vid],\n }\n if v.shape[1] > 10:\n v_attr[\"additional\"] = v[vid][10:]\n nx.set_node_attributes(G, {vid: v_attr})\n # Fill in EDGE\n for _i in range(K):\n for _j in range(K):\n if _i >= _j:\n continue\n # src = i, dst = j\n ind = map_upper_triangle_to_list(_i, _j, K)\n # e: [type(3), plucker(6), rlim(2), plim(2)]\n e_type = E[ind, :3].argmax()\n if e_type == 0:\n continue\n assert _i in original_vid and _j in original_vid, \"invalid edge detected!\"\n src_i, dst_j = original_vid.index(_i), original_vid.index(_j)\n e_plucker = E[ind, 3:9]\n if e_type == 2: # flip\n e_plucker = -e_plucker\n e_rlim, e_plim = E[ind, 9:11], E[ind, 11:13]\n T_gi, T_gj = np.eye(4), np.eye(4)\n T_gi[:3, :3] = G.nodes[src_i][\"R\"]\n T_gi[:3, 3] = G.nodes[src_i][\"t\"]\n T_gj[:3, :3] = G.nodes[dst_j][\"R\"]\n T_gj[:3, 3] = G.nodes[dst_j][\"t\"]\n T_ig = np.linalg.inv(T_gi).copy()\n T_ij = T_ig.copy() @ T_gj.copy() # T0\n local_plucker = e_plucker.copy()\n li = T_ig[:3, :3] @ local_plucker[:3]\n mi = T_ig[:3, :3] @ local_plucker[3:] + np.cross(T_ig[:3, 3], li)\n local_plucker = np.concatenate([li, mi])\n G.add_edge(src_i, dst_j)\n nx.set_edge_attributes(\n G,\n {\n (src_i, dst_j): {\n \"src\": src_i,\n \"dst\": dst_j,\n \"T_src_dst\": T_ij.copy(),\n \"plucker\": local_plucker.copy(),\n \"plim\": e_plim.copy(),\n \"rlim\": e_rlim.copy(),\n # additional info\n \"global_plucker\": e_plucker.copy(), # this along with t0, R0 may be used for compute parameter space distance for evaluation\n \"T_ig\": T_ig.copy(),\n \"T_gj\": T_gj.copy(),\n }\n },\n )\n return G"
},
{
"identifier": "append_mesh_to_G",
"path": "object_utils/arti_viz_utils.py",
"snippet": "def append_mesh_to_G(G, mesh_list, key=\"mesh\"):\n for v, v_data in G.nodes(data=True):\n if mesh_list[v] is None:\n continue\n nx.set_node_attributes(G, {v: {key: mesh_list[v]}})\n return G"
},
{
"identifier": "viz_G_topology",
"path": "object_utils/arti_viz_utils.py",
"snippet": "def viz_G_topology(\n G,\n r_edge_color=\"tab:red\",\n p_edge_color=\"tab:blue\",\n hybrid_edge_color=\"tab:orange\",\n node_size=800,\n title=\"\",\n fig_size=(3, 3),\n dpi=100,\n r_range_th=3e-3,\n p_range_th=3e-3,\n show_border=True,\n):\n fig = plt.figure(figsize=fig_size, dpi=dpi)\n pos = nx.kamada_kawai_layout(G)\n options = {\"edgecolors\": \"tab:gray\", \"node_size\": node_size, \"alpha\": 0.9}\n for n, n_data in G.nodes(data=True):\n nx.draw_networkx_nodes(G, pos, nodelist=[n], node_color=G.nodes[n][\"color\"], **options)\n nx.draw_networkx_edges(G, pos, width=2.0, alpha=1.0)\n for e0, e1, e_data in G.edges(data=True):\n r_lim, p_lim = e_data[\"rlim\"], e_data[\"plim\"]\n r_range = abs(r_lim[1] - r_lim[0])\n p_range = abs(p_lim[1] - p_lim[0])\n if r_range > r_range_th and p_range > p_range_th:\n edge_color = hybrid_edge_color\n elif r_range > r_range_th:\n edge_color = r_edge_color\n else:\n edge_color = p_edge_color\n nx.draw_networkx_edges(\n G, pos, edgelist=[(e0, e1)], width=8, alpha=0.7, edge_color=edge_color\n )\n\n plt.title(f\"{title}|V|={len(G.nodes)},|E|={len(G.edges)}\")\n if not show_border:\n plt.axis(\"off\")\n ax = plt.gca()\n fig.tight_layout(pad=1.0)\n rgb = plot_to_image(fig)\n plt.close(fig)\n return rgb"
},
{
"identifier": "viz_G",
"path": "object_utils/arti_viz_utils.py",
"snippet": "def viz_G(\n G,\n bbox_thickness=0.03,\n bbox_alpha=0.6,\n viz_frame_N=16,\n cam_dist=4.0,\n pitch=np.pi / 4.0,\n yaw=np.pi / 4.0,\n shape=(480, 480),\n light_intensity=1.0,\n light_vertical_angle=-np.pi / 3.0,\n cat_dim=1,\n moving_mask=None,\n mesh_key=\"mesh\",\n viz_box=True,\n render_flags=0,\n):\n ret = []\n if len(G.nodes) >= 2 and nx.is_tree(G): # now only support tree viz\n # * now G is connected and acyclic\n # find the root\n vid = [n for n in G.nodes]\n v_bbox = np.stack([d[\"bbox\"] for nid, d in G.nodes(data=True)], 0)\n v_volume = v_bbox.prod(axis=-1) * 8\n root_vid = vid[v_volume.argmax()]\n\n # * sample a set of possible angle range for each joint\n for step in range(viz_frame_N):\n node_traverse_list = [n for n in nx.dfs_preorder_nodes(G, root_vid)]\n T_rl_list = [np.eye(4)] # p_root = T_rl @ p_link\n # * prepare the node pos\n for i in range(len(node_traverse_list) - 1):\n # ! find the parent!\n cid = node_traverse_list[i + 1]\n\n for e, e_data in G.edges.items():\n if cid in e:\n # determine the direction by ensure the other end is a predessor in the traversal list\n other_end = e[0] if e[1] == cid else e[1]\n if node_traverse_list.index(other_end) > i:\n continue\n else:\n pid = other_end\n\n # T1: e_T_src_j1, T2: e_T_j2_dst\n e_data = G.edges[e]\n _T0 = e_data[\"T_src_dst\"]\n plucker = e_data[\"plucker\"]\n l, m = plucker[:3], plucker[3:]\n plim, rlim = e_data[\"plim\"], e_data[\"rlim\"]\n if moving_mask is None or moving_mask[e]:\n theta = np.linspace(*rlim, viz_frame_N)[step]\n d = np.linspace(*plim, viz_frame_N)[step]\n else: # don't move\n theta = np.linspace(*rlim, viz_frame_N)[0]\n d = np.linspace(*plim, viz_frame_N)[0]\n _T1 = screw_to_T(theta, d, l, m)\n T_src_dst = _T1 @ _T0\n if pid == e_data[\"src\"]: # parent is src\n T_parent_child = T_src_dst\n else: # parent is dst\n T_parent_child = np.linalg.inv(T_src_dst)\n # T_parent_child = T_src_dst\n T_root_child = T_rl_list[node_traverse_list.index(pid)] @ T_parent_child\n T_rl_list.append(T_root_child)\n break\n assert len(T_rl_list) == len(node_traverse_list)\n\n # * prepare the bbox\n bbox_edge_start_list, bbox_edge_dir_list = [], []\n bbox_corner_list, bbox_color_list = [], []\n pcl_color_list = []\n # bbox_colors = cm.hsv(np.linspace(0, 1, len(node_traverse_list) + 1))[:-1]\n mesh_list, mesh_color_list = [], []\n for nid, T in zip(node_traverse_list, T_rl_list):\n bbox = G.nodes[nid][\"bbox\"]\n color = G.nodes[nid][\"color\"]\n pcl_color_list.append(color)\n\n if mesh_key in G.nodes[nid].keys():\n mesh = G.nodes[nid][mesh_key].copy()\n mesh.apply_transform(T.copy())\n mesh_list.append(mesh)\n mesh_color_list.append([c * 0.5 for c in color[:3]] + [0.7])\n bbox_corner = BBOX_CORNER * bbox\n bbox_corner = bbox_corner\n bbox_corner = bbox_corner @ T[:3, :3].T + T[:3, 3]\n bbox_corner_list.append(bbox_corner)\n bbox_edge_start_ind = [0, 0, 2, 1, 3, 3, 5, 6, 0, 1, 4, 2]\n bbox_edge_end_ind = [1, 2, 4, 4, 6, 5, 7, 7, 3, 6, 7, 5]\n bbox_start = bbox_corner[bbox_edge_start_ind]\n bbox_end = bbox_corner[bbox_edge_end_ind]\n bbox_edge_start_list.append(bbox_start)\n bbox_edge_dir_list.append(bbox_end - bbox_start)\n bbox_color = color\n bbox_color[-1] = bbox_alpha\n bbox_color_list.append(np.tile(bbox_color[None, :], [12, 1]))\n\n if len(bbox_corner_list) > 0:\n bbox_color_list = np.concatenate(bbox_color_list, 0)\n bbox_edge_start_list = np.concatenate(bbox_edge_start_list, 0)\n bbox_edge_dir_list = np.concatenate(bbox_edge_dir_list, 0)\n arrow_tuples = (bbox_edge_start_list, bbox_edge_dir_list)\n else:\n bbox_color_list, bbox_edge_start_list, bbox_edge_dir_list = None, None, None\n arrow_tuples = None\n\n rgb0 = render(\n mesh_list=mesh_list,\n mesh_color_list=mesh_color_list,\n cam_dist=cam_dist,\n cam_angle_pitch=pitch,\n cam_angle_yaw=yaw,\n shape=shape,\n light_intensity=light_intensity,\n light_vertical_angle=light_vertical_angle,\n render_flags=render_flags,\n )\n if viz_box:\n rgb1 = render(\n pcl_list=bbox_corner_list,\n pcl_color_list=pcl_color_list,\n pcl_radius_list=[bbox_thickness * 2.0] * len(bbox_corner_list),\n # arrows\n arrow_head=False,\n arrow_tuples=arrow_tuples,\n arrow_colors=bbox_color_list,\n arrow_radius=bbox_thickness,\n cam_dist=cam_dist,\n cam_angle_pitch=pitch,\n cam_angle_yaw=yaw,\n shape=shape,\n light_intensity=light_intensity,\n light_vertical_angle=light_vertical_angle,\n render_flags=render_flags,\n )\n # # debug\n # imageio.imsave(\"./dbg.png\", rgb)\n rgb = np.concatenate([rgb0, rgb1], cat_dim)\n else:\n rgb = rgb0\n ret.append(rgb)\n else:\n dummy = np.ones((shape[0], shape[1] * 2, 3), dtype=np.uint8) * 127\n ret = [dummy] * viz_frame_N\n # imageio.mimsave(\"./debug/dbg.gif\", ret, fps=10)\n ret = ret + ret[::-1]\n return ret"
}
] | import sys, os, os.path as osp
import yaml, logging, imageio, torch, os
import os.path as osp
import numpy as np
import networkx as nx
import trimesh
import pickle
from matplotlib.axes._axes import _log as matplotlib_axes_logger
from tqdm import tqdm
from sklearn.neighbors import NearestNeighbors
from copy import deepcopy
from object_utils.arti_graph_utils_v3 import get_G_from_VE
from object_utils.arti_viz_utils import append_mesh_to_G, viz_G_topology, viz_G
from multiprocessing import Pool | 3,918 | # helpers for Save the generated V, E
sys.path.append(osp.dirname(os.getcwd()))
matplotlib_axes_logger.setLevel("ERROR")
device = torch.device("cuda:0")
# import multiprocessing
def extract_recon_mesh_for_nodes(G, extract_fn):
for v, v_data in G.nodes(data=True):
if "additional" not in v_data:
continue
z = v_data["additional"][None, :]
mesh = extract_fn(torch.from_numpy(z).cuda())
mesh_centroid = mesh.bounds.mean(0)
mesh.apply_translation(-mesh_centroid)
bbox = v_data["bbox"].copy()
scale = 2.0 * np.linalg.norm(bbox) / np.linalg.norm(mesh.bounds[1] - mesh.bounds[0])
mesh.apply_scale(scale)
nx.set_node_attributes(G, {v: {"mesh": mesh}})
return G
def find_nn_database_mesh_and_update_G(G, database, mesh_names, mesh_dir):
for v, v_data in G.nodes(data=True):
if "additional" not in v_data:
continue
z = v_data["additional"][None, :]
# find nn
_d, _ind = database.kneighbors(z, return_distance=True)
# print(_ind)
_ind = int(_ind.squeeze(0))
fn = osp.join(mesh_dir, mesh_names[int(_ind)] + ".off")
gt_mesh = trimesh.load(fn, force="mesh", process=False) # ! debug
mesh_centroid = gt_mesh.bounds.mean(0)
gt_mesh.apply_translation(-mesh_centroid)
bbox = v_data["bbox"].copy()
scale = 2.0 * np.linalg.norm(bbox) / np.linalg.norm(gt_mesh.bounds[1] - gt_mesh.bounds[0])
gt_mesh.apply_scale(scale)
nx.set_node_attributes(G, {v: {"mesh": gt_mesh}})
return G
def _save_viz_thread(p):
G, save_dir, name = p
# print(p)
viz_dir = save_dir + "_viz"
os.makedirs(viz_dir, exist_ok=True)
os.makedirs(save_dir, exist_ok=True)
# viz_topo = viz_G_topology(G)
# imageio.imwrite(osp.join(viz_dir, f"{name}.png"), viz_topo)
# print("start rendering...")
| # helpers for Save the generated V, E
sys.path.append(osp.dirname(os.getcwd()))
matplotlib_axes_logger.setLevel("ERROR")
device = torch.device("cuda:0")
# import multiprocessing
def extract_recon_mesh_for_nodes(G, extract_fn):
for v, v_data in G.nodes(data=True):
if "additional" not in v_data:
continue
z = v_data["additional"][None, :]
mesh = extract_fn(torch.from_numpy(z).cuda())
mesh_centroid = mesh.bounds.mean(0)
mesh.apply_translation(-mesh_centroid)
bbox = v_data["bbox"].copy()
scale = 2.0 * np.linalg.norm(bbox) / np.linalg.norm(mesh.bounds[1] - mesh.bounds[0])
mesh.apply_scale(scale)
nx.set_node_attributes(G, {v: {"mesh": mesh}})
return G
def find_nn_database_mesh_and_update_G(G, database, mesh_names, mesh_dir):
for v, v_data in G.nodes(data=True):
if "additional" not in v_data:
continue
z = v_data["additional"][None, :]
# find nn
_d, _ind = database.kneighbors(z, return_distance=True)
# print(_ind)
_ind = int(_ind.squeeze(0))
fn = osp.join(mesh_dir, mesh_names[int(_ind)] + ".off")
gt_mesh = trimesh.load(fn, force="mesh", process=False) # ! debug
mesh_centroid = gt_mesh.bounds.mean(0)
gt_mesh.apply_translation(-mesh_centroid)
bbox = v_data["bbox"].copy()
scale = 2.0 * np.linalg.norm(bbox) / np.linalg.norm(gt_mesh.bounds[1] - gt_mesh.bounds[0])
gt_mesh.apply_scale(scale)
nx.set_node_attributes(G, {v: {"mesh": gt_mesh}})
return G
def _save_viz_thread(p):
G, save_dir, name = p
# print(p)
viz_dir = save_dir + "_viz"
os.makedirs(viz_dir, exist_ok=True)
os.makedirs(save_dir, exist_ok=True)
# viz_topo = viz_G_topology(G)
# imageio.imwrite(osp.join(viz_dir, f"{name}.png"), viz_topo)
# print("start rendering...") | viz_list = viz_G(G, cam_dist=3.0, viz_frame_N=5, shape=(128, 128)) | 3 | 2023-10-22 03:46:35+00:00 | 8k |
yongliang-wu/ExploreCfg | open_flamingo/src/factory.py | [
{
"identifier": "Flamingo",
"path": "open_flamingo/src/flamingo.py",
"snippet": "class Flamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: int,\n cross_attn_every_n_layers: int = 1,\n use_media_placement_augmentation: bool = False,\n ):\n \"\"\"\n Args:\n vision_encoder (nn.Module): HF CLIPModel\n lang_encoder (nn.Module): HF causal language model\n eoc_token_id (int): Token id for <|endofchunk|>\n media_token_id (int): Token id for <image>\n vis_dim (int): Dimension of the visual features.\n Visual features are projected to match this shape along the last dimension.\n cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.\n use_media_placement_augmentation (bool, optional): Whether to randomly assign images to the preceding or following text in training. Defaults to False.\n \"\"\"\n super().__init__()\n self.eoc_token_id = eoc_token_id\n self.media_token_id = media_token_id\n self.use_media_placement_augmentation = use_media_placement_augmentation\n self.vis_dim = vis_dim\n self.vision_encoder = vision_encoder\n self.perceiver = PerceiverResampler(dim=self.vis_dim)\n self.lang_encoder = lang_encoder\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n use_media_placement_augmentation=self.use_media_placement_augmentation,\n )\n\n def forward(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n labels: torch.Tensor = None,\n use_cached_vision_x: bool = False,\n clear_conditioned_layers: bool = True,\n past_key_values=None,\n use_cache: bool = False,\n ):\n \"\"\"\n Forward pass of Flamingo.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W) with F=1\n lang_x (torch.Tensor): Language input ids\n shape (B, T_txt)\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n labels (torch.Tensor, optional): Labels. Defaults to None.\n clear_conditioned_layers: if True, clear the conditioned layers\n once the foward pass is completed. Set this to false if the\n same set of images will be reused in another subsequent\n forward pass.\n past_key_values: pre-computed values to pass to language model.\n See past_key_values documentation in Hugging Face\n CausalLM models.\n use_cache: whether to use cached key values. See use_cache\n documentation in Hugging Face CausalLM models.\n \"\"\"\n assert (\n vision_x is not None\n ) or use_cached_vision_x, (\n \"Must provide either vision_x or use_cached_vision_x to True.\"\n )\n\n if use_cached_vision_x:\n # Case: use cached; vision_x should be cached and other\n # vision-related inputs should not be provided.\n assert (\n vision_x is None\n ), \"Expect vision_x to be None when use_cached_vision_x is True.\"\n assert self.lang_encoder.is_conditioned()\n\n else:\n # Case: do not use caching (i.e. this is a standard forward pass);\n self._encode_vision_x(vision_x=vision_x)\n\n output = self.lang_encoder(\n input_ids=lang_x,\n attention_mask=attention_mask,\n labels=labels,\n past_key_values=past_key_values,\n use_cache=use_cache,\n )\n\n if clear_conditioned_layers:\n self.lang_encoder.clear_conditioned_layers()\n\n return output\n\n def generate(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n num_beams=1,\n max_new_tokens=None,\n temperature=1.0,\n top_k=0,\n top_p=1.0,\n no_repeat_ngram_size=0,\n prefix_allowed_tokens_fn=None,\n length_penalty=1.0,\n num_return_sequences=1,\n do_sample=False,\n early_stopping=False,\n ):\n \"\"\"\n Generate text conditioned on vision and language inputs.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n images in the same chunk are collated along T_img, and frames are collated along F\n currently only F=1 is supported (single-frame videos)\n lang_x (torch.Tensor): Language input\n shape (B, T_txt)\n max_length (int, optional): Maximum length of the output. Defaults to None.\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n num_beams (int, optional): Number of beams. Defaults to 1.\n max_new_tokens (int, optional): Maximum new tokens. Defaults to None.\n temperature (float, optional): Temperature. Defaults to 1.0.\n top_k (int, optional): Top k. Defaults to 0.\n top_p (float, optional): Top p. Defaults to 1.0.\n no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.\n length_penalty (float, optional): Length penalty. Defaults to 1.0.\n num_return_sequences (int, optional): Number of return sequences. Defaults to 1.\n do_sample (bool, optional): Do sample. Defaults to False.\n early_stopping (bool, optional): Early stopping. Defaults to False.\n Returns:\n torch.Tensor: lang_x with generated tokens appended to it\n \"\"\"\n if num_beams > 1:\n vision_x = vision_x.repeat_interleave(num_beams, dim=0)\n\n self._encode_vision_x(vision_x=vision_x)\n\n output = self.lang_encoder.generate(\n lang_x,\n attention_mask=attention_mask,\n eos_token_id=self.eoc_token_id,\n num_beams=num_beams,\n max_new_tokens=max_new_tokens,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n no_repeat_ngram_size=no_repeat_ngram_size,\n length_penalty=length_penalty,\n num_return_sequences=num_return_sequences,\n do_sample=do_sample,\n early_stopping=early_stopping,\n )\n\n self.lang_encoder.clear_conditioned_layers()\n return output\n\n def _encode_vision_x(self, vision_x: torch.Tensor):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)"
},
{
"identifier": "FlamingoLMMixin",
"path": "open_flamingo/src/flamingo_lm.py",
"snippet": "class FlamingoLMMixin(nn.Module):\n \"\"\"\n Mixin to add cross-attention layers to a language model.\n \"\"\"\n\n def set_decoder_layers_attr_name(self, decoder_layers_attr_name):\n self.decoder_layers_attr_name = decoder_layers_attr_name\n\n def _get_decoder_layers(self):\n return getattr_recursive(self, self.decoder_layers_attr_name)\n\n def _set_decoder_layers(self, value):\n setattr_recursive(self, self.decoder_layers_attr_name, value)\n\n def init_flamingo(\n self,\n media_token_id,\n vis_hidden_size,\n cross_attn_every_n_layers,\n use_media_placement_augmentation,\n ):\n \"\"\"\n Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations.\n \"\"\"\n\n self.gated_cross_attn_layers = nn.ModuleList(\n [\n GatedCrossAttentionBlock(\n dim=self.config.hidden_size, dim_visual=vis_hidden_size\n )\n if (layer_idx + 1) % cross_attn_every_n_layers == 0\n else None\n for layer_idx, _ in enumerate(self._get_decoder_layers())\n ]\n )\n self._set_decoder_layers(\n nn.ModuleList(\n [\n FlamingoLayer(gated_cross_attn_layer, decoder_layer)\n for gated_cross_attn_layer, decoder_layer in zip(\n self.gated_cross_attn_layers, self._get_decoder_layers()\n )\n ]\n )\n )\n self.media_token_id = media_token_id\n self.use_media_placement_augmentation = use_media_placement_augmentation\n self.initialized_flamingo = True\n\n def forward(self, *input, **kwargs):\n \"\"\"Condition the Flamingo layers on the media locations before forward()\"\"\"\n if not self.initialized_flamingo:\n raise ValueError(\n \"Flamingo layers are not initialized. Please call `init_flamingo` first.\"\n )\n\n input_ids = kwargs[\"input_ids\"] if \"input_ids\" in kwargs else input[0]\n media_locations = input_ids == self.media_token_id\n attend_previous = (\n (random.random() < 0.5) if self.use_media_placement_augmentation else False\n )\n\n for layer in self.get_decoder().layers:\n layer.condition_media_locations(media_locations)\n layer.condition_attend_previous(attend_previous)\n\n return super().forward(\n *input, **kwargs\n ) # Call the other parent's forward method\n\n def is_conditioned(self) -> bool:\n \"\"\"Check whether all decoder layers are already conditioned.\"\"\"\n return all(l.is_conditioned() for l in self._get_decoder_layers())\n\n def clear_conditioned_layers(self):\n for layer in self._get_decoder_layers():\n layer.condition_vis_x(None)\n layer.condition_media_locations(None)\n layer.condition_attend_previous(None)"
},
{
"identifier": "extend_instance",
"path": "open_flamingo/src/utils.py",
"snippet": "def extend_instance(obj, mixin):\n \"\"\"Apply mixins to a class instance after creation\"\"\"\n base_cls = obj.__class__\n base_cls_name = obj.__class__.__name__\n obj.__class__ = type(\n base_cls_name, (mixin, base_cls), {}\n ) # mixin needs to go first for our forward() logic to work"
}
] | from transformers import AutoModelForCausalLM, AutoTokenizer
from typing import Literal, Optional
from .flamingo import Flamingo
from .flamingo_lm import FlamingoLMMixin
from .utils import extend_instance
from open_clip import transformer
from torch.nn import functional as F
import open_clip
import torch | 3,641 |
def LNormforward(self, x: torch.Tensor):
#x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
transformer.LayerNormFp32.forward = LNormforward
def create_model_and_transforms(
clip_vision_encoder_path: str,
clip_vision_encoder_pretrained: str,
lang_encoder_path: str,
tokenizer_path: str,
cross_attn_every_n_layers: int = 1,
use_local_files: bool = False,
decoder_layers_attr_name: Optional[str] = None,
inference: bool = False,
precision: Literal["fp16","fp32"] = "fp32",
device: str = "cpu",
checkpoint_path: Optional[str] = None,
**flamingo_kwargs,
):
"""
Initialize a Flamingo model from a pretrained vision encoder and language encoder.
Appends special tokens to the tokenizer and freezes backbones.
Args:
clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32")
clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k")
lang_encoder_path (str): path to pretrained language encoder
tokenizer_path (str): path to pretrained tokenizer
cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1.
use_local_files (bool, optional): whether to use local files. Defaults to False.
decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None.
inference (bool, optional): whether to use inference mode. Defaults to True.
precision (str, optional): precision to use. Defaults to "fp16".
device (str, optional): device to use. Defaults to "cuda".
checkpoint_path (str, optional): path to flamingo checkpoint. Defaults to None.
Returns:
Flamingo: Flamingo model from pretrained vision and language encoders
Image processor: Pipeline to preprocess input images
Tokenizer: A tokenizer for the language model
"""
vision_encoder, _, image_processor = open_clip.create_model_and_transforms(
clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained,
precision=precision, device=device
)
# set the vision encoder to output the visual features
vision_encoder.visual.output_tokens = True
text_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path, local_files_only=use_local_files
)
# add Flamingo special tokens to the tokenizer
text_tokenizer.add_special_tokens(
{"additional_special_tokens": ["<|endofchunk|>", "<image>"]}
)
if text_tokenizer.pad_token is None:
# Issue: GPT models don't have a pad token, which we use to
# modify labels for the loss.
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
dtype = torch.float16 if precision == "fp16" else torch.float32
lang_encoder = AutoModelForCausalLM.from_pretrained(
lang_encoder_path, local_files_only=use_local_files,
torch_dtype=dtype, # DO NOT EVER USE device_map HERE IT WILL CAUSE HORROR
).to(device)
extend_instance(lang_encoder, FlamingoLMMixin)
if decoder_layers_attr_name is None:
decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder)
lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name)
lang_encoder.resize_token_embeddings(len(text_tokenizer))
|
def LNormforward(self, x: torch.Tensor):
#x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
transformer.LayerNormFp32.forward = LNormforward
def create_model_and_transforms(
clip_vision_encoder_path: str,
clip_vision_encoder_pretrained: str,
lang_encoder_path: str,
tokenizer_path: str,
cross_attn_every_n_layers: int = 1,
use_local_files: bool = False,
decoder_layers_attr_name: Optional[str] = None,
inference: bool = False,
precision: Literal["fp16","fp32"] = "fp32",
device: str = "cpu",
checkpoint_path: Optional[str] = None,
**flamingo_kwargs,
):
"""
Initialize a Flamingo model from a pretrained vision encoder and language encoder.
Appends special tokens to the tokenizer and freezes backbones.
Args:
clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32")
clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k")
lang_encoder_path (str): path to pretrained language encoder
tokenizer_path (str): path to pretrained tokenizer
cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1.
use_local_files (bool, optional): whether to use local files. Defaults to False.
decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None.
inference (bool, optional): whether to use inference mode. Defaults to True.
precision (str, optional): precision to use. Defaults to "fp16".
device (str, optional): device to use. Defaults to "cuda".
checkpoint_path (str, optional): path to flamingo checkpoint. Defaults to None.
Returns:
Flamingo: Flamingo model from pretrained vision and language encoders
Image processor: Pipeline to preprocess input images
Tokenizer: A tokenizer for the language model
"""
vision_encoder, _, image_processor = open_clip.create_model_and_transforms(
clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained,
precision=precision, device=device
)
# set the vision encoder to output the visual features
vision_encoder.visual.output_tokens = True
text_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path, local_files_only=use_local_files
)
# add Flamingo special tokens to the tokenizer
text_tokenizer.add_special_tokens(
{"additional_special_tokens": ["<|endofchunk|>", "<image>"]}
)
if text_tokenizer.pad_token is None:
# Issue: GPT models don't have a pad token, which we use to
# modify labels for the loss.
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
dtype = torch.float16 if precision == "fp16" else torch.float32
lang_encoder = AutoModelForCausalLM.from_pretrained(
lang_encoder_path, local_files_only=use_local_files,
torch_dtype=dtype, # DO NOT EVER USE device_map HERE IT WILL CAUSE HORROR
).to(device)
extend_instance(lang_encoder, FlamingoLMMixin)
if decoder_layers_attr_name is None:
decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder)
lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name)
lang_encoder.resize_token_embeddings(len(text_tokenizer))
| model = Flamingo( | 0 | 2023-10-18 02:38:00+00:00 | 8k |
vorausrobotik/voraus-ad-dataset | train.py | [
{
"identifier": "Configuration",
"path": "configuration.py",
"snippet": "class Configuration(BaseModel):\n \"\"\"Describes the configuration parameters.\"\"\"\n\n seed: int\n epochs: int\n batchsize: int\n n_hidden_layers: int = Field(alias=\"nHiddenLayers\")\n n_coupling_blocks: int = Field(alias=\"nCouplingBlocks\")\n scale: int\n columns: Literal[\"machine\", \"mechanical\", \"electrical\", \"computed\", \"measured\"]\n clamp: float\n pad: bool\n frequency_divider: int = Field(alias=\"frequencyDivider\")\n train_gain: float = Field(alias=\"trainGain\")\n normalize: bool\n kernel_size_1: int = Field(alias=\"kernelSize1\")\n dilation_1: int = Field(alias=\"dilation1\")\n kernel_size_2: int = Field(alias=\"kernelSize2\")\n dilation_2: int = Field(alias=\"dilation2\")\n kernel_size_3: int = Field(alias=\"kernelSize3\")\n dilation_3: int = Field(alias=\"dilation3\")\n milestones: list[int]\n gamma: float\n learning_rate: float = Field(alias=\"learningRate\")"
},
{
"identifier": "NormalizingFlow",
"path": "normalizing_flow.py",
"snippet": "class NormalizingFlow(GraphINN):\r\n \"\"\"Describes the normalizing flow model.\"\"\"\r\n\r\n def __init__(self, input_dimension: Tuple[int, ...], config: Configuration) -> None:\r\n \"\"\"Initializes the normalizing flow model.\r\n\r\n Args:\r\n input_dimension: The input dimensions.\r\n config: The configuration of the model.\r\n \"\"\"\r\n nodes = [InputNode(*input_dimension, name=\"input\")]\r\n\r\n int_network = InternalNetwork.setup(\r\n input_dimension[1],\r\n input_dimension[0],\r\n n_hidden_layers=config.n_hidden_layers,\r\n scale=config.scale,\r\n kernel_size_1=config.kernel_size_1,\r\n dilation_1=config.dilation_1,\r\n kernel_size_2=config.kernel_size_2,\r\n dilation_2=config.dilation_2,\r\n kernel_size_3=config.kernel_size_3,\r\n dilation_3=config.dilation_3,\r\n )\r\n\r\n for cbi in range(config.n_coupling_blocks):\r\n kwargs: Dict[Any, Any] = {}\r\n\r\n nodes.append(\r\n Node(nodes[-1], PermuteRandom, kwargs, name=f\"permute{cbi}\"),\r\n )\r\n nodes.append(\r\n Node(\r\n nodes[-1],\r\n CouplingBlock,\r\n {\r\n \"subnet_constructor\": int_network.constructor,\r\n \"clamp\": config.clamp,\r\n },\r\n name=f\"cb{cbi}\",\r\n )\r\n )\r\n\r\n output_node = OutputNode(nodes[-1], name=\"output\")\r\n nodes.append(output_node)\r\n\r\n super().__init__(nodes)\r"
},
{
"identifier": "get_loss",
"path": "normalizing_flow.py",
"snippet": "def get_loss(z_space: Tensor, jac: Tensor) -> Tensor:\r\n \"\"\"Calculate the loss of a batch.\r\n\r\n Computes the negative log likelihood loss (per dimension) assuming z should be Gaussian.\r\n\r\n Args:\r\n z_space: The batch result.\r\n jac: The jacobian matrix.\r\n\r\n Returns:\r\n The loss of the batch.\r\n \"\"\"\r\n sum_dimension = tuple(range(1, z_space.dim()))\r\n number = numpy.prod(z_space.shape[1:])\r\n return torch.mean(torch.sum(z_space**2, dim=sum_dimension) - jac) / number\r"
},
{
"identifier": "get_loss_per_sample",
"path": "normalizing_flow.py",
"snippet": "def get_loss_per_sample(z_space: Tensor, jac: Tensor) -> Tensor:\r\n \"\"\"Calculates the loss per sample.\r\n\r\n Args:\r\n z_space: The batch result.\r\n jac: The jacobian matrix.\r\n\r\n Returns:\r\n The loss per sample.\r\n \"\"\"\r\n sum_dimension = tuple(range(1, z_space.dim()))\r\n loss = 0.5 * torch.sum(z_space**2, dim=sum_dimension) - jac\r\n return loss\r"
},
{
"identifier": "ANOMALY_CATEGORIES",
"path": "voraus_ad.py",
"snippet": "ANOMALY_CATEGORIES = [\n Category.AXIS_FRICTION,\n Category.AXIS_WEIGHT,\n Category.COLLISION_FOAM,\n Category.COLLISION_CABLE,\n Category.COLLISION_CARTON,\n Category.MISS_CAN,\n Category.LOSE_CAN,\n Category.CAN_WEIGHT,\n Category.ENTANGLED,\n Category.INVALID_POSITION,\n Category.MOTOR_COMMUTATION,\n Category.WOBBLING_STATION,\n]"
},
{
"identifier": "Signals",
"path": "voraus_ad.py",
"snippet": "class Signals:\n \"\"\"Contains the signals of the robot used in the dataset.\"\"\"\n\n TIME = \"time\"\n SAMPLE = \"sample\"\n ANOMALY = \"anomaly\"\n CATEGORY = \"category\"\n SETTING = \"setting\"\n ACTION = \"action\"\n ACTIVE = \"active\"\n ROBOT_VOLTAGE = \"robot_voltage\"\n ROBOT_CURRENT = \"robot_current\"\n IO_CURRENT = \"io_current\"\n SYSTEM_CURRENT = \"system_current\"\n TARGET_POSITION_1 = \"target_position_1\"\n TARGET_VELOCITY_1 = \"target_velocity_1\"\n TARGET_ACCELERATION_1 = \"target_acceleration_1\"\n TARGET_TORQUE_1 = \"target_torque_1\"\n COMPUTED_INERTIA_1 = \"computed_inertia_1\"\n COMPUTED_TORQUE_1 = \"computed_torque_1\"\n MOTOR_POSITION_1 = \"motor_position_1\"\n MOTOR_VELOCITY_1 = \"motor_velocity_1\"\n JOINT_POSITION_1 = \"joint_position_1\"\n JOINT_VELOCITY_1 = \"joint_velocity_1\"\n MOTOR_TORQUE_1 = \"motor_torque_1\"\n TORQUE_SENSOR_A_1 = \"torque_sensor_a_1\"\n TORQUE_SENSOR_B_1 = \"torque_sensor_b_1\"\n MOTOR_IQ_1 = \"motor_iq_1\"\n MOTOR_ID_1 = \"motor_id_1\"\n POWER_MOTOR_EL_1 = \"power_motor_el_1\"\n POWER_MOTOR_MECH_1 = \"power_motor_mech_1\"\n POWER_LOAD_MECH_1 = \"power_load_mech_1\"\n MOTOR_VOLTAGE_1 = \"motor_voltage_1\"\n SUPPLY_VOLTAGE_1 = \"supply_voltage_1\"\n BRAKE_VOLTAGE_1 = \"brake_voltage_1\"\n TARGET_POSITION_2 = \"target_position_2\"\n TARGET_VELOCITY_2 = \"target_velocity_2\"\n TARGET_ACCELERATION_2 = \"target_acceleration_2\"\n TARGET_TORQUE_2 = \"target_torque_2\"\n COMPUTED_INERTIA_2 = \"computed_inertia_2\"\n COMPUTED_TORQUE_2 = \"computed_torque_2\"\n MOTOR_POSITION_2 = \"motor_position_2\"\n MOTOR_VELOCITY_2 = \"motor_velocity_2\"\n JOINT_POSITION_2 = \"joint_position_2\"\n JOINT_VELOCITY_2 = \"joint_velocity_2\"\n MOTOR_TORQUE_2 = \"motor_torque_2\"\n TORQUE_SENSOR_A_2 = \"torque_sensor_a_2\"\n TORQUE_SENSOR_B_2 = \"torque_sensor_b_2\"\n MOTOR_IQ_2 = \"motor_iq_2\"\n MOTOR_ID_2 = \"motor_id_2\"\n POWER_MOTOR_EL_2 = \"power_motor_el_2\"\n POWER_MOTOR_MECH_2 = \"power_motor_mech_2\"\n POWER_LOAD_MECH_2 = \"power_load_mech_2\"\n MOTOR_VOLTAGE_2 = \"motor_voltage_2\"\n SUPPLY_VOLTAGE_2 = \"supply_voltage_2\"\n BRAKE_VOLTAGE_2 = \"brake_voltage_2\"\n TARGET_POSITION_3 = \"target_position_3\"\n TARGET_VELOCITY_3 = \"target_velocity_3\"\n TARGET_ACCELERATION_3 = \"target_acceleration_3\"\n TARGET_TORQUE_3 = \"target_torque_3\"\n COMPUTED_INERTIA_3 = \"computed_inertia_3\"\n COMPUTED_TORQUE_3 = \"computed_torque_3\"\n MOTOR_POSITION_3 = \"motor_position_3\"\n MOTOR_VELOCITY_3 = \"motor_velocity_3\"\n JOINT_POSITION_3 = \"joint_position_3\"\n JOINT_VELOCITY_3 = \"joint_velocity_3\"\n MOTOR_TORQUE_3 = \"motor_torque_3\"\n TORQUE_SENSOR_A_3 = \"torque_sensor_a_3\"\n TORQUE_SENSOR_B_3 = \"torque_sensor_b_3\"\n MOTOR_IQ_3 = \"motor_iq_3\"\n MOTOR_ID_3 = \"motor_id_3\"\n POWER_MOTOR_EL_3 = \"power_motor_el_3\"\n POWER_MOTOR_MECH_3 = \"power_motor_mech_3\"\n POWER_LOAD_MECH_3 = \"power_load_mech_3\"\n MOTOR_VOLTAGE_3 = \"motor_voltage_3\"\n SUPPLY_VOLTAGE_3 = \"supply_voltage_3\"\n BRAKE_VOLTAGE_3 = \"brake_voltage_3\"\n TARGET_POSITION_4 = \"target_position_4\"\n TARGET_VELOCITY_4 = \"target_velocity_4\"\n TARGET_ACCELERATION_4 = \"target_acceleration_4\"\n TARGET_TORQUE_4 = \"target_torque_4\"\n COMPUTED_INERTIA_4 = \"computed_inertia_4\"\n COMPUTED_TORQUE_4 = \"computed_torque_4\"\n MOTOR_POSITION_4 = \"motor_position_4\"\n MOTOR_VELOCITY_4 = \"motor_velocity_4\"\n JOINT_POSITION_4 = \"joint_position_4\"\n JOINT_VELOCITY_4 = \"joint_velocity_4\"\n MOTOR_TORQUE_4 = \"motor_torque_4\"\n TORQUE_SENSOR_A_4 = \"torque_sensor_a_4\"\n TORQUE_SENSOR_B_4 = \"torque_sensor_b_4\"\n MOTOR_IQ_4 = \"motor_iq_4\"\n MOTOR_ID_4 = \"motor_id_4\"\n POWER_MOTOR_EL_4 = \"power_motor_el_4\"\n POWER_MOTOR_MECH_4 = \"power_motor_mech_4\"\n POWER_LOAD_MECH_4 = \"power_load_mech_4\"\n MOTOR_VOLTAGE_4 = \"motor_voltage_4\"\n SUPPLY_VOLTAGE_4 = \"supply_voltage_4\"\n BRAKE_VOLTAGE_4 = \"brake_voltage_4\"\n TARGET_POSITION_5 = \"target_position_5\"\n TARGET_VELOCITY_5 = \"target_velocity_5\"\n TARGET_ACCELERATION_5 = \"target_acceleration_5\"\n TARGET_TORQUE_5 = \"target_torque_5\"\n COMPUTED_INERTIA_5 = \"computed_inertia_5\"\n COMPUTED_TORQUE_5 = \"computed_torque_5\"\n MOTOR_POSITION_5 = \"motor_position_5\"\n MOTOR_VELOCITY_5 = \"motor_velocity_5\"\n JOINT_POSITION_5 = \"joint_position_5\"\n JOINT_VELOCITY_5 = \"joint_velocity_5\"\n MOTOR_TORQUE_5 = \"motor_torque_5\"\n TORQUE_SENSOR_A_5 = \"torque_sensor_a_5\"\n TORQUE_SENSOR_B_5 = \"torque_sensor_b_5\"\n MOTOR_IQ_5 = \"motor_iq_5\"\n MOTOR_ID_5 = \"motor_id_5\"\n POWER_MOTOR_EL_5 = \"power_motor_el_5\"\n POWER_MOTOR_MECH_5 = \"power_motor_mech_5\"\n POWER_LOAD_MECH_5 = \"power_load_mech_5\"\n MOTOR_VOLTAGE_5 = \"motor_voltage_5\"\n SUPPLY_VOLTAGE_5 = \"supply_voltage_5\"\n BRAKE_VOLTAGE_5 = \"brake_voltage_5\"\n TARGET_POSITION_6 = \"target_position_6\"\n TARGET_VELOCITY_6 = \"target_velocity_6\"\n TARGET_ACCELERATION_6 = \"target_acceleration_6\"\n TARGET_TORQUE_6 = \"target_torque_6\"\n COMPUTED_INERTIA_6 = \"computed_inertia_6\"\n COMPUTED_TORQUE_6 = \"computed_torque_6\"\n MOTOR_POSITION_6 = \"motor_position_6\"\n MOTOR_VELOCITY_6 = \"motor_velocity_6\"\n JOINT_POSITION_6 = \"joint_position_6\"\n JOINT_VELOCITY_6 = \"joint_velocity_6\"\n MOTOR_TORQUE_6 = \"motor_torque_6\"\n TORQUE_SENSOR_A_6 = \"torque_sensor_a_6\"\n TORQUE_SENSOR_B_6 = \"torque_sensor_b_6\"\n MOTOR_IQ_6 = \"motor_iq_6\"\n MOTOR_ID_6 = \"motor_id_6\"\n POWER_MOTOR_EL_6 = \"power_motor_el_6\"\n POWER_MOTOR_MECH_6 = \"power_motor_mech_6\"\n POWER_LOAD_MECH_6 = \"power_load_mech_6\"\n MOTOR_VOLTAGE_6 = \"motor_voltage_6\"\n SUPPLY_VOLTAGE_6 = \"supply_voltage_6\"\n BRAKE_VOLTAGE_6 = \"brake_voltage_6\"\n\n @classmethod\n def all(cls) -> tuple[str, ...]:\n \"\"\"Returns all signals (machine data and meta) included in the voraus-AD dataset.\n\n Returns:\n All signals of the voraus-AD dataset.\n \"\"\"\n return (\n cls.TIME,\n cls.SAMPLE,\n cls.ANOMALY,\n cls.CATEGORY,\n cls.SETTING,\n cls.ACTION,\n cls.ACTIVE,\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n cls.TARGET_POSITION_1,\n cls.TARGET_VELOCITY_1,\n cls.TARGET_ACCELERATION_1,\n cls.TARGET_TORQUE_1,\n cls.COMPUTED_INERTIA_1,\n cls.COMPUTED_TORQUE_1,\n cls.MOTOR_POSITION_1,\n cls.MOTOR_VELOCITY_1,\n cls.JOINT_POSITION_1,\n cls.JOINT_VELOCITY_1,\n cls.MOTOR_TORQUE_1,\n cls.TORQUE_SENSOR_A_1,\n cls.TORQUE_SENSOR_B_1,\n cls.MOTOR_IQ_1,\n cls.MOTOR_ID_1,\n cls.POWER_MOTOR_EL_1,\n cls.POWER_MOTOR_MECH_1,\n cls.POWER_LOAD_MECH_1,\n cls.MOTOR_VOLTAGE_1,\n cls.SUPPLY_VOLTAGE_1,\n cls.BRAKE_VOLTAGE_1,\n cls.TARGET_POSITION_2,\n cls.TARGET_VELOCITY_2,\n cls.TARGET_ACCELERATION_2,\n cls.TARGET_TORQUE_2,\n cls.COMPUTED_INERTIA_2,\n cls.COMPUTED_TORQUE_2,\n cls.MOTOR_POSITION_2,\n cls.MOTOR_VELOCITY_2,\n cls.JOINT_POSITION_2,\n cls.JOINT_VELOCITY_2,\n cls.MOTOR_TORQUE_2,\n cls.TORQUE_SENSOR_A_2,\n cls.TORQUE_SENSOR_B_2,\n cls.MOTOR_IQ_2,\n cls.MOTOR_ID_2,\n cls.POWER_MOTOR_EL_2,\n cls.POWER_MOTOR_MECH_2,\n cls.POWER_LOAD_MECH_2,\n cls.MOTOR_VOLTAGE_2,\n cls.SUPPLY_VOLTAGE_2,\n cls.BRAKE_VOLTAGE_2,\n cls.TARGET_POSITION_3,\n cls.TARGET_VELOCITY_3,\n cls.TARGET_ACCELERATION_3,\n cls.TARGET_TORQUE_3,\n cls.COMPUTED_INERTIA_3,\n cls.COMPUTED_TORQUE_3,\n cls.MOTOR_POSITION_3,\n cls.MOTOR_VELOCITY_3,\n cls.JOINT_POSITION_3,\n cls.JOINT_VELOCITY_3,\n cls.MOTOR_TORQUE_3,\n cls.TORQUE_SENSOR_A_3,\n cls.TORQUE_SENSOR_B_3,\n cls.MOTOR_IQ_3,\n cls.MOTOR_ID_3,\n cls.POWER_MOTOR_EL_3,\n cls.POWER_MOTOR_MECH_3,\n cls.POWER_LOAD_MECH_3,\n cls.MOTOR_VOLTAGE_3,\n cls.SUPPLY_VOLTAGE_3,\n cls.BRAKE_VOLTAGE_3,\n cls.TARGET_POSITION_4,\n cls.TARGET_VELOCITY_4,\n cls.TARGET_ACCELERATION_4,\n cls.TARGET_TORQUE_4,\n cls.COMPUTED_INERTIA_4,\n cls.COMPUTED_TORQUE_4,\n cls.MOTOR_POSITION_4,\n cls.MOTOR_VELOCITY_4,\n cls.JOINT_POSITION_4,\n cls.JOINT_VELOCITY_4,\n cls.MOTOR_TORQUE_4,\n cls.TORQUE_SENSOR_A_4,\n cls.TORQUE_SENSOR_B_4,\n cls.MOTOR_IQ_4,\n cls.MOTOR_ID_4,\n cls.POWER_MOTOR_EL_4,\n cls.POWER_MOTOR_MECH_4,\n cls.POWER_LOAD_MECH_4,\n cls.MOTOR_VOLTAGE_4,\n cls.SUPPLY_VOLTAGE_4,\n cls.BRAKE_VOLTAGE_4,\n cls.TARGET_POSITION_5,\n cls.TARGET_VELOCITY_5,\n cls.TARGET_ACCELERATION_5,\n cls.TARGET_TORQUE_5,\n cls.COMPUTED_INERTIA_5,\n cls.COMPUTED_TORQUE_5,\n cls.MOTOR_POSITION_5,\n cls.MOTOR_VELOCITY_5,\n cls.JOINT_POSITION_5,\n cls.JOINT_VELOCITY_5,\n cls.MOTOR_TORQUE_5,\n cls.TORQUE_SENSOR_A_5,\n cls.TORQUE_SENSOR_B_5,\n cls.MOTOR_IQ_5,\n cls.MOTOR_ID_5,\n cls.POWER_MOTOR_EL_5,\n cls.POWER_MOTOR_MECH_5,\n cls.POWER_LOAD_MECH_5,\n cls.MOTOR_VOLTAGE_5,\n cls.SUPPLY_VOLTAGE_5,\n cls.BRAKE_VOLTAGE_5,\n cls.TARGET_POSITION_6,\n cls.TARGET_VELOCITY_6,\n cls.TARGET_ACCELERATION_6,\n cls.TARGET_TORQUE_6,\n cls.COMPUTED_INERTIA_6,\n cls.COMPUTED_TORQUE_6,\n cls.MOTOR_POSITION_6,\n cls.MOTOR_VELOCITY_6,\n cls.JOINT_POSITION_6,\n cls.JOINT_VELOCITY_6,\n cls.MOTOR_TORQUE_6,\n cls.TORQUE_SENSOR_A_6,\n cls.TORQUE_SENSOR_B_6,\n cls.MOTOR_IQ_6,\n cls.MOTOR_ID_6,\n cls.POWER_MOTOR_EL_6,\n cls.POWER_MOTOR_MECH_6,\n cls.POWER_LOAD_MECH_6,\n cls.MOTOR_VOLTAGE_6,\n cls.SUPPLY_VOLTAGE_6,\n cls.BRAKE_VOLTAGE_6,\n )\n\n @classmethod\n def meta(cls) -> tuple[str, ...]:\n \"\"\"Returns the meta colums of the voraus-AD dataset.\n\n Returns:\n The meta columns of the dataset.\n \"\"\"\n return (\n cls.TIME,\n cls.SAMPLE,\n cls.ANOMALY,\n cls.CATEGORY,\n cls.SETTING,\n cls.ACTION,\n cls.ACTIVE,\n )\n\n @classmethod\n def meta_constant(cls) -> tuple[str, ...]:\n \"\"\"Returns time invariant meta colums of the voraus-AD dataset.\n\n Returns:\n The time invariant meta columns.\n \"\"\"\n return (\n cls.SAMPLE,\n cls.ANOMALY,\n cls.CATEGORY,\n cls.SETTING,\n )\n\n @classmethod\n def electrical(cls) -> tuple[str, ...]:\n \"\"\"Returns the part of the machine data columns, which describes electrical values.\n\n Returns:\n The electrical signals.\n \"\"\"\n return (\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n cls.MOTOR_IQ_1,\n cls.MOTOR_ID_1,\n cls.POWER_MOTOR_EL_1,\n cls.MOTOR_VOLTAGE_1,\n cls.SUPPLY_VOLTAGE_1,\n cls.BRAKE_VOLTAGE_1,\n cls.MOTOR_IQ_2,\n cls.MOTOR_ID_2,\n cls.POWER_MOTOR_EL_2,\n cls.MOTOR_VOLTAGE_2,\n cls.SUPPLY_VOLTAGE_2,\n cls.BRAKE_VOLTAGE_2,\n cls.MOTOR_IQ_3,\n cls.MOTOR_ID_3,\n cls.POWER_MOTOR_EL_3,\n cls.MOTOR_VOLTAGE_3,\n cls.SUPPLY_VOLTAGE_3,\n cls.BRAKE_VOLTAGE_3,\n cls.MOTOR_IQ_4,\n cls.MOTOR_ID_4,\n cls.POWER_MOTOR_EL_4,\n cls.MOTOR_VOLTAGE_4,\n cls.SUPPLY_VOLTAGE_4,\n cls.BRAKE_VOLTAGE_4,\n cls.MOTOR_IQ_5,\n cls.MOTOR_ID_5,\n cls.POWER_MOTOR_EL_5,\n cls.MOTOR_VOLTAGE_5,\n cls.SUPPLY_VOLTAGE_5,\n cls.BRAKE_VOLTAGE_5,\n cls.MOTOR_IQ_6,\n cls.MOTOR_ID_6,\n cls.POWER_MOTOR_EL_6,\n cls.MOTOR_VOLTAGE_6,\n cls.SUPPLY_VOLTAGE_6,\n cls.BRAKE_VOLTAGE_6,\n )\n\n @classmethod\n def measured(cls) -> tuple[str, ...]:\n \"\"\"Returns the part of the machine data, which describes measured values.\n\n Returns:\n The measured signals.\n \"\"\"\n return (\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n cls.MOTOR_POSITION_1,\n cls.MOTOR_VELOCITY_1,\n cls.JOINT_POSITION_1,\n cls.JOINT_VELOCITY_1,\n cls.TORQUE_SENSOR_A_1,\n cls.TORQUE_SENSOR_B_1,\n cls.MOTOR_VOLTAGE_1,\n cls.SUPPLY_VOLTAGE_1,\n cls.BRAKE_VOLTAGE_1,\n cls.MOTOR_POSITION_2,\n cls.MOTOR_VELOCITY_2,\n cls.JOINT_POSITION_2,\n cls.JOINT_VELOCITY_2,\n cls.TORQUE_SENSOR_A_2,\n cls.TORQUE_SENSOR_B_2,\n cls.MOTOR_VOLTAGE_2,\n cls.SUPPLY_VOLTAGE_2,\n cls.BRAKE_VOLTAGE_2,\n cls.MOTOR_POSITION_3,\n cls.MOTOR_VELOCITY_3,\n cls.JOINT_POSITION_3,\n cls.JOINT_VELOCITY_3,\n cls.TORQUE_SENSOR_A_3,\n cls.TORQUE_SENSOR_B_3,\n cls.MOTOR_VOLTAGE_3,\n cls.SUPPLY_VOLTAGE_3,\n cls.BRAKE_VOLTAGE_3,\n cls.MOTOR_POSITION_4,\n cls.MOTOR_VELOCITY_4,\n cls.JOINT_POSITION_4,\n cls.JOINT_VELOCITY_4,\n cls.TORQUE_SENSOR_A_4,\n cls.TORQUE_SENSOR_B_4,\n cls.MOTOR_VOLTAGE_4,\n cls.SUPPLY_VOLTAGE_4,\n cls.BRAKE_VOLTAGE_4,\n cls.MOTOR_POSITION_5,\n cls.MOTOR_VELOCITY_5,\n cls.JOINT_POSITION_5,\n cls.JOINT_VELOCITY_5,\n cls.TORQUE_SENSOR_A_5,\n cls.TORQUE_SENSOR_B_5,\n cls.MOTOR_VOLTAGE_5,\n cls.SUPPLY_VOLTAGE_5,\n cls.BRAKE_VOLTAGE_5,\n cls.MOTOR_POSITION_6,\n cls.MOTOR_VELOCITY_6,\n cls.JOINT_POSITION_6,\n cls.JOINT_VELOCITY_6,\n cls.TORQUE_SENSOR_A_6,\n cls.TORQUE_SENSOR_B_6,\n cls.MOTOR_VOLTAGE_6,\n cls.SUPPLY_VOLTAGE_6,\n cls.BRAKE_VOLTAGE_6,\n )\n\n @classmethod\n def robot(cls) -> tuple[str, ...]:\n \"\"\"Returns all columns, which are not related to the robot axes, but to the robot itself.\n\n Returns:\n The robot system signals.\n \"\"\"\n return (\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n )\n\n @classmethod\n def machine(cls) -> tuple[str, ...]:\n \"\"\"Returns all columns, which are machine data.\n\n This excludes the meta columns of the dataset.\n The machine data should be used for training, it contains all available measurements and target values.\n\n Returns:\n The machine data signals.\n \"\"\"\n return tuple(s for s in cls.all() if s not in cls.meta())\n\n @classmethod\n def mechanical(cls) -> tuple[str, ...]:\n \"\"\"Returns the columns, which describe mechanical values.\n\n Returns:\n The machanical signals.\n \"\"\"\n return tuple(s for s in cls.machine() if s not in cls.electrical())\n\n @classmethod\n def computed(cls) -> tuple[str, ...]:\n \"\"\"Returns the columns, which describe computed values like targets.\n\n Returns:\n The computed signals.\n \"\"\"\n return tuple(s for s in cls.machine() if s not in cls.measured())\n\n @classmethod\n def axis(cls) -> tuple[str, ...]:\n \"\"\"Returns the columns, which describe robot axis specific values.\n\n Returns:\n The robot axis specific signals.\n \"\"\"\n signals_axis = tuple(s for s in cls.machine() if s not in cls.robot())\n number_of_axis = 6\n assert len(signals_axis) % number_of_axis == 0\n signals_per_axis = round(len(signals_axis) / number_of_axis)\n print(signals_per_axis)\n return signals_axis\n\n @classmethod\n def groups(cls) -> dict[str, tuple[str, ...]]:\n \"\"\"Access the signal groups by name.\n\n Returns:\n The signal group dictionary.\n \"\"\"\n return {\n \"mechanical\": cls.mechanical(),\n \"electrical\": cls.electrical(),\n \"computed\": cls.computed(),\n \"measured\": cls.measured(),\n \"machine\": cls.machine(), # all machine data\n }"
},
{
"identifier": "load_torch_dataloaders",
"path": "voraus_ad.py",
"snippet": "def load_torch_dataloaders( # pylint: disable=too-many-locals\n dataset: Union[Path, str],\n batch_size: int,\n seed: int,\n columns: Union[List[str], Tuple],\n normalize: bool,\n frequency_divider: int,\n train_gain: float,\n pad: bool = True,\n) -> tuple[VorausADDataset, VorausADDataset, DataLoader, DataLoader]:\n \"\"\"Loads the voraus-AD dataset (train and test) as torch data loaders and datasets.\n\n Args:\n dataset: The path to the dataset.\n batch_size: The batch size to use.\n seed: The seed o use for the dataloader random generator.\n columns: The colums to load.\n normalize: Whether to normalize the data with standard scaler or not.\n frequency_divider: Scale the dataset down by dropping every nth sample.\n train_gain: The factor of train samples to use.\n pad: Whether to use zero padding or not.\n\n Returns:\n The data loaders and datasets.\n \"\"\"\n x_train, y_train, x_test, y_test = load_torch_tensors(\n path=dataset,\n columns=columns,\n normalize=normalize,\n frequency_divider=frequency_divider,\n train_gain=train_gain,\n pad=pad,\n )\n\n train_dataset = VorausADDataset(x_train, y_train, list(columns))\n test_dataset = VorausADDataset(x_test, y_test, list(columns))\n\n generator = torch.Generator()\n generator.manual_seed(seed)\n\n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, generator=generator)\n test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n return train_dataset, test_dataset, train_dataloader, test_dataloader"
}
] | import random
import numpy
import pandas
import torch
import torch.backends.cudnn
from pathlib import Path
from typing import Dict, List, Optional
from sklearn import metrics
from torch import optim
from configuration import Configuration
from normalizing_flow import NormalizingFlow, get_loss, get_loss_per_sample
from voraus_ad import ANOMALY_CATEGORIES, Signals, load_torch_dataloaders | 7,087 | """Contains the training of the normalizing flow model."""
# If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU.
# The training will take much longer but is reproducible.
DETERMINISTIC_CUDA = False
DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet"
MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define the training configuration and hyperparameters of the model.
| """Contains the training of the normalizing flow model."""
# If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU.
# The training will take much longer but is reproducible.
DETERMINISTIC_CUDA = False
DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet"
MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define the training configuration and hyperparameters of the model. | configuration = Configuration( | 0 | 2023-10-18 15:09:24+00:00 | 8k |
invictus717/UniDG | domainbed/scripts/sweep.py | [
{
"identifier": "datasets",
"path": "domainbed/datasets.py",
"snippet": "DATASETS = [\n # Debug\n \"Debug28\",\n \"Debug224\",\n # Small images\n \"ColoredMNIST\",\n \"RotatedMNIST\",\n # Big images\n \"VLCS\",\n \"PACS\",\n \"OfficeHome\",\n \"TerraIncognita\",\n \"DomainNet\",\n \"SVIRO\",\n # WILDS datasets\n \"WILDSCamelyon\",\n \"WILDSFMoW\"\n]\n N_STEPS = 5001 # Default, subclasses may override\n CHECKPOINT_FREQ = 100 # Default, subclasses may override\n N_WORKERS = 4 # Default, subclasses may override\n ENVIRONMENTS = None # Subclasses should override\n INPUT_SHAPE = None # Subclasses should override\n INPUT_SHAPE = (3, 28, 28)\n ENVIRONMENTS = ['0', '1', '2']\n INPUT_SHAPE = (3, 224, 224)\n ENVIRONMENTS = ['0', '1', '2']\n ENVIRONMENTS = ['+90%', '+80%', '-90%']\n ENVIRONMENTS = ['0', '15', '30', '45', '60', '75']\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"C\", \"L\", \"S\", \"V\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"S\"]\n CHECKPOINT_FREQ = 1000\n ENVIRONMENTS = [\"clip\", \"info\", \"paint\", \"quick\", \"real\", \"sketch\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"R\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"L100\", \"L38\", \"L43\", \"L46\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"aclass\", \"escape\", \"hilux\", \"i3\", \"lexus\", \"tesla\", \"tiguan\", \"tucson\", \"x5\", \"zoe\"]\n INPUT_SHAPE = (3, 224, 224)\n ENVIRONMENTS = [ \"hospital_0\", \"hospital_1\", \"hospital_2\", \"hospital_3\",\n \"hospital_4\"]\n ENVIRONMENTS = [ \"region_0\", \"region_1\", \"region_2\", \"region_3\",\n \"region_4\", \"region_5\"]\nclass MyDataParallel(torch.nn.DataParallel):\nclass MultipleDomainDataset:\nclass Debug(MultipleDomainDataset):\nclass Debug28(Debug):\nclass Debug224(Debug):\nclass MultipleEnvironmentMNIST(MultipleDomainDataset):\nclass ColoredMNIST(MultipleEnvironmentMNIST):\nclass RotatedMNIST(MultipleEnvironmentMNIST):\nclass MultipleEnvironmentImageFolder(MultipleDomainDataset):\nclass VLCS(MultipleEnvironmentImageFolder):\nclass PACS(MultipleEnvironmentImageFolder):\nclass DomainNet(MultipleEnvironmentImageFolder):\nclass OfficeHome(MultipleEnvironmentImageFolder):\nclass TerraIncognita(MultipleEnvironmentImageFolder):\nclass SVIRO(MultipleEnvironmentImageFolder):\nclass WILDSEnvironment:\nclass WILDSDataset(MultipleDomainDataset):\nclass WILDSCamelyon(WILDSDataset):\nclass WILDSFMoW(WILDSDataset):\n def __getattr__(self, name):\ndef get_dataset_class(dataset_name):\ndef num_environments(dataset_name):\n def __getitem__(self, index):\n def __len__(self):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, environments, dataset_transform, input_shape,\n num_classes):\n def __init__(self, root, test_envs, hparams):\n def color_dataset(self, images, labels, environment):\n def torch_bernoulli_(self, p, size):\n def torch_xor_(self, a, b):\n def __init__(self, root, test_envs, hparams):\n def rotate_dataset(self, images, labels, angle):\n def __init__(self, root, test_envs, augment, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(\n self,\n wilds_dataset,\n metadata_name,\n metadata_value,\n transform=None):\n def __getitem__(self, i):\n def __len__(self):\n def __init__(self, dataset, metadata_name, test_envs, augment, hparams):\n def metadata_values(self, wilds_dataset, metadata_name):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):"
},
{
"identifier": "hparams_registry",
"path": "domainbed/hparams_registry.py",
"snippet": "def _define_hparam(hparams, hparam_name, default_val, random_val_fn):\ndef _hparams(algorithm, dataset, random_seed):\n def _hparam(name, default_val, random_val_fn):\ndef default_hparams(algorithm, dataset):\ndef random_hparams(algorithm, dataset, seed):\n SMALL_IMAGES = ['Debug28', 'RotatedMNIST', 'ColoredMNIST']"
},
{
"identifier": "algorithms",
"path": "domainbed/algorithms.py",
"snippet": "ALGORITHMS = [\n 'ERM',\n 'IRM',\n 'GroupDRO',\n 'Mixup',\n 'MLDG',\n 'CORAL',\n 'MMD',\n 'DANN',\n 'CDANN',\n 'MTL',\n 'SagNet',\n 'ARM',\n 'VREx',\n 'RSC',\n 'SD',\n 'MIRO'\n]\n D = self.my_cdist(x, y)\n K = torch.zeros_like(D)\ndef get_algorithm_class(algorithm_name):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def forward(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains,\n hparams, conditional, class_balance):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def _irm_penalty(logits, y):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams, gaussian):\n def my_cdist(self, x1, x2):\n def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100,\n 1000]):\n def mmd(self, x, y):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def update_embeddings_(self, features, env=None):\n def predict(self, x, env=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def opt(p):\n def forward_c(self, x):\n def forward_s(self, x):\n def randomize(self, x, what=\"style\", eps=1e-5):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, network):\n def forward(self, x):\n def predict(self, x):\n def __init__(self, shape):\n def forward(self, x):\n def __init__(self, shape, init=0.1, channelwise=True, eps=1e-5):\n def forward(self, x):\ndef get_shapes(model, input_shape):\n def __init__(self, input_shape, num_classes, num_domains, hparams, **kwargs):\n def update(self, x, y, **kwargs):\n def predict(self, x):\n def get_forward_model(self):\nclass Algorithm(torch.nn.Module):\nclass ERM(Algorithm):\nclass ARM(ERM):\nclass AbstractDANN(Algorithm):\nclass DANN(AbstractDANN):\nclass CDANN(AbstractDANN):\nclass IRM(ERM):\nclass VREx(ERM):\nclass Mixup(ERM):\nclass GroupDRO(ERM):\nclass MLDG(ERM):\nclass AbstractMMD(ERM):\nclass MMD(AbstractMMD):\nclass CORAL(AbstractMMD):\nclass MTL(Algorithm):\nclass SagNet(Algorithm):\nclass RSC(ERM):\nclass SD(ERM):\nclass ForwardModel(nn.Module):\nclass MeanEncoder(nn.Module):\nclass VarianceEncoder(nn.Module):\nclass MIRO(Algorithm):"
},
{
"identifier": "misc",
"path": "domainbed/lib/misc.py",
"snippet": "def make_weights_for_balanced_classes(dataset):\ndef pdb():\ndef seed_hash(*args):\ndef print_separator():\ndef print_row(row, colwidth=10, latex=False):\n def format_val(x):\n def __init__(self, underlying_dataset, keys):\n def __getitem__(self, key):\n def __len__(self):\ndef split_dataset(dataset, n, seed=0):\ndef random_pairs_of_minibatches(minibatches):\ndef accuracy(network, loader, weights, device):\ndef softmax_entropy(x: torch.Tensor) -> torch.Tensor:\ndef accuracy_ent(network, loader, weights, device, adapt=False):\n def __init__(self, fname, mode=\"a\"):\n def write(self, message):\n def flush(self):\nclass _SplitDataset(torch.utils.data.Dataset):\nclass Tee:"
},
{
"identifier": "command_launchers",
"path": "domainbed/command_launchers.py",
"snippet": "def local_launcher(commands):\ndef dummy_launcher(commands):\ndef multi_gpu_launcher(commands):\nREGISTRY = {\n 'local': local_launcher,\n 'dummy': dummy_launcher,\n 'multi_gpu': multi_gpu_launcher\n}"
}
] | import argparse
import copy
import getpass
import hashlib
import json
import os
import random
import shutil
import time
import uuid
import numpy as np
import torch
import tqdm
import shlex
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed.lib import misc
from domainbed import command_launchers | 4,431 | '--input_dir', self.train_args['output_dir'],
'--ft_mode', ft_mode
]
self.command_str = ' '.join(command)
if os.path.exists(os.path.join(self.output_dir, 'done')):
if os.path.exists(os.path.join(self.output_dir, 'done_{}'.format(ft_mode))):
self.state = SAJob.DONE
else:
self.state = SAJob.PRETRAINED
elif os.path.exists(os.path.join(self.output_dir, 'results_{}.jsonl'.format(ft_mode))):
self.state = SAJob.INCOMPLETE
else:
self.state = SAJob.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'],
self.train_args['algorithm'],
self.train_args['test_envs'],
self.train_args['hparams_seed'], self.ft_mode)
return '{}: {} {}'.format(
self.state,
self.output_dir,
job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
class UAJob:
NOT_LAUNCHED = 'Not launched'
INCOMPLETE = 'Incomplete'
PRETRAINED = 'Pretrained'
DONE = 'Done'
def __init__(self, train_args, sweep_output_dir, adapt_algorithm):
args_str = json.dumps(train_args, sort_keys=True)
args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest()
self.output_dir = os.path.join(sweep_output_dir, args_hash)
self.adapt_algorithm = adapt_algorithm
self.train_args = copy.deepcopy(train_args)
self.train_args['output_dir'] = self.output_dir
command = [
'python', '-m', 'domainbed.scripts.unsupervised_adaptation',
'--input_dir', self.train_args['output_dir'],
'--adapt_algorithm', adapt_algorithm
]
self.command_str = ' '.join(command)
if os.path.exists(os.path.join(self.output_dir, 'done')):
if os.path.exists(os.path.join(self.output_dir, 'done_{}'.format(adapt_algorithm))):
self.state = UAJob.DONE
else:
self.state = UAJob.PRETRAINED
elif os.path.exists(os.path.join(self.output_dir, 'results_{}.jsonl'.format(adapt_algorithm))):
self.state = UAJob.INCOMPLETE
else:
self.state = UAJob.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'],
self.train_args['algorithm'],
self.train_args['test_envs'],
self.train_args['hparams_seed'], self.adapt_algorithm)
return '{}: {} {}'.format(
self.state,
self.output_dir,
job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
def all_test_env_combinations(n):
"""
For a dataset with n >= 3 envs, return all combinations of 1 and 2 test
envs.
"""
assert(n >= 3)
for i in range(n):
yield [i]
for j in range(i+1, n):
yield [i, j]
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Run sweeps
"""
class Job:
NOT_LAUNCHED = 'Not launched'
INCOMPLETE = 'Incomplete'
DONE = 'Done'
def __init__(self, train_args, sweep_output_dir):
args_str = json.dumps(train_args, sort_keys=True)
args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest()
self.output_dir = os.path.join(sweep_output_dir, args_hash)
self.train_args = copy.deepcopy(train_args)
self.train_args['output_dir'] = self.output_dir
command = ['OMP_NUM_THREADS=1', 'python', '-m', 'domainbed.scripts.train']
for k, v in sorted(self.train_args.items()):
if isinstance(v, list):
v = ' '.join([str(v_) for v_ in v])
elif isinstance(v, str):
v = shlex.quote(v)
command.append(f'--{k} {v}')
self.command_str = ' '.join(command)
if os.path.exists(os.path.join(self.output_dir, 'done')):
self.state = Job.DONE
elif os.path.exists(self.output_dir):
self.state = Job.INCOMPLETE
else:
self.state = Job.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'],
self.train_args['algorithm'],
self.train_args['test_envs'],
self.train_args['hparams_seed'])
return '{}: {} {}'.format(
self.state,
self.output_dir,
job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
@staticmethod
def delete(jobs):
print('Deleting...')
for job in jobs:
shutil.rmtree(job.output_dir)
print(f'Deleted {len(jobs)} jobs!')
class SAJob:
NOT_LAUNCHED = 'Not launched'
INCOMPLETE = 'Incomplete'
PRETRAINED = 'Pretrained'
DONE = 'Done'
def __init__(self, train_args, sweep_output_dir, ft_mode):
args_str = json.dumps(train_args, sort_keys=True)
args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest()
self.output_dir = os.path.join(sweep_output_dir, args_hash)
self.ft_mode = ft_mode
self.train_args = copy.deepcopy(train_args)
self.train_args['output_dir'] = self.output_dir
command = [
'python', '-m', 'domainbed.scripts.supervised_adaptation',
'--input_dir', self.train_args['output_dir'],
'--ft_mode', ft_mode
]
self.command_str = ' '.join(command)
if os.path.exists(os.path.join(self.output_dir, 'done')):
if os.path.exists(os.path.join(self.output_dir, 'done_{}'.format(ft_mode))):
self.state = SAJob.DONE
else:
self.state = SAJob.PRETRAINED
elif os.path.exists(os.path.join(self.output_dir, 'results_{}.jsonl'.format(ft_mode))):
self.state = SAJob.INCOMPLETE
else:
self.state = SAJob.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'],
self.train_args['algorithm'],
self.train_args['test_envs'],
self.train_args['hparams_seed'], self.ft_mode)
return '{}: {} {}'.format(
self.state,
self.output_dir,
job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
class UAJob:
NOT_LAUNCHED = 'Not launched'
INCOMPLETE = 'Incomplete'
PRETRAINED = 'Pretrained'
DONE = 'Done'
def __init__(self, train_args, sweep_output_dir, adapt_algorithm):
args_str = json.dumps(train_args, sort_keys=True)
args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest()
self.output_dir = os.path.join(sweep_output_dir, args_hash)
self.adapt_algorithm = adapt_algorithm
self.train_args = copy.deepcopy(train_args)
self.train_args['output_dir'] = self.output_dir
command = [
'python', '-m', 'domainbed.scripts.unsupervised_adaptation',
'--input_dir', self.train_args['output_dir'],
'--adapt_algorithm', adapt_algorithm
]
self.command_str = ' '.join(command)
if os.path.exists(os.path.join(self.output_dir, 'done')):
if os.path.exists(os.path.join(self.output_dir, 'done_{}'.format(adapt_algorithm))):
self.state = UAJob.DONE
else:
self.state = UAJob.PRETRAINED
elif os.path.exists(os.path.join(self.output_dir, 'results_{}.jsonl'.format(adapt_algorithm))):
self.state = UAJob.INCOMPLETE
else:
self.state = UAJob.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'],
self.train_args['algorithm'],
self.train_args['test_envs'],
self.train_args['hparams_seed'], self.adapt_algorithm)
return '{}: {} {}'.format(
self.state,
self.output_dir,
job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
def all_test_env_combinations(n):
"""
For a dataset with n >= 3 envs, return all combinations of 1 and 2 test
envs.
"""
assert(n >= 3)
for i in range(n):
yield [i]
for j in range(i+1, n):
yield [i, j]
| def make_args_list(n_trials_from, n_trials, dataset_names, algorithms, n_hparams_from, n_hparams, steps, | 2 | 2023-10-15 14:26:12+00:00 | 8k |
AI-Application-and-Integration-Lab/DGUA_FAS | experiment/m/train.py | [
{
"identifier": "save_checkpoint",
"path": "util/utils.py",
"snippet": "def save_checkpoint(save_list, is_best, model, gpus, checkpoint_path, best_model_path, filename='_checkpoint.pth.tar'):\n epoch = save_list[0]\n valid_args = save_list[1]\n best_model_HTER = round(save_list[2], 5)\n best_model_ACC = save_list[3]\n best_model_ACER = save_list[4]\n threshold = save_list[5]\n if(len(gpus) > 1):\n old_state_dict = model.state_dict()\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in old_state_dict.items():\n flag = k.find('.module.')\n if (flag != -1):\n k = k.replace('.module.', '.')\n new_state_dict[k] = v\n state = {\n \"epoch\": epoch,\n \"state_dict\": new_state_dict,\n \"valid_arg\": valid_args,\n \"best_model_EER\": best_model_HTER,\n \"best_model_ACER\": best_model_ACER,\n \"best_model_ACC\": best_model_ACC,\n \"threshold\": threshold\n }\n else:\n state = {\n \"epoch\": epoch,\n \"state_dict\": model.state_dict(),\n \"valid_arg\": valid_args,\n \"best_model_EER\": best_model_HTER,\n \"best_model_ACER\": best_model_ACER,\n \"best_model_ACC\": best_model_ACC,\n \"threshold\": threshold\n }\n filepath = checkpoint_path + filename\n torch.save(state, filepath)\n # just save best model\n if is_best:\n # shutil.copy(filepath, best_model_path + 'model_best_' + str(best_model_HTER) + '_' + str(epoch) + '.pth.tar')\n shutil.copy(filepath, best_model_path + 'best_model.pth.tar')"
},
{
"identifier": "AverageMeter",
"path": "util/utils.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count"
},
{
"identifier": "Logger",
"path": "util/utils.py",
"snippet": "class Logger(object):\n def __init__(self):\n self.terminal = sys.stdout\n self.file = None\n\n def open(self, file, mode=None):\n if mode is None:\n mode = 'w'\n self.file = open(file, mode)\n def write(self, message, is_terminal=1, is_file=1):\n if '\\r' in message:\n is_file = 0\n if is_terminal == 1:\n self.terminal.write(message)\n self.terminal.flush()\n if is_file == 1:\n self.file.write(message)\n self.file.flush()\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n # this handles the flush command by doing nothing.\n # you might want to specify some extra behavior here.\n pass"
},
{
"identifier": "accuracy",
"path": "util/utils.py",
"snippet": "def accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res"
},
{
"identifier": "mkdirs",
"path": "util/utils.py",
"snippet": "def mkdirs(checkpoint_path, best_model_path, logs):\n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\n if not os.path.exists(best_model_path):\n os.makedirs(best_model_path)\n if not os.path.exists(logs):\n os.mkdir(logs)"
},
{
"identifier": "time_to_str",
"path": "util/utils.py",
"snippet": "def time_to_str(t, mode='min'):\n if mode=='min':\n t = int(t)/60\n hr = t//60\n min = t%60\n return '%2d hr %02d min'%(hr,min)\n elif mode=='sec':\n t = int(t)\n min = t//60\n sec = t%60\n return '%2d min %02d sec'%(min,sec)\n else:\n raise NotImplementedError"
},
{
"identifier": "eval",
"path": "util/evaluate.py",
"snippet": "def eval(valid_dataloader, model):\n criterion = nn.CrossEntropyLoss()\n valid_losses = AverageMeter()\n valid_top1 = AverageMeter()\n prob_dict = {}\n label_dict = {}\n model.eval()\n output_dict_tmp = {}\n target_dict_tmp = {}\n \n with torch.no_grad():\n for iter, (input, target, videoID) in enumerate(valid_dataloader):\n input = Variable(input).cuda()\n target = Variable(torch.from_numpy(np.array(target)).long()).cuda()\n cls_out = model(input)\n prob = F.softmax(cls_out, dim=1).cpu().data.numpy()[:, 1]\n label = target.cpu().data.numpy()\n videoID = videoID.cpu().data.numpy()\n for i in range(len(prob)):\n if(videoID[i] in prob_dict.keys()):\n prob_dict[videoID[i]].append(prob[i])\n label_dict[videoID[i]].append(label[i])\n output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3))\n target_dict_tmp[videoID[i]].append(target[i].view(1))\n else:\n prob_dict[videoID[i]] = []\n label_dict[videoID[i]] = []\n prob_dict[videoID[i]].append(prob[i])\n label_dict[videoID[i]].append(label[i])\n output_dict_tmp[videoID[i]] = []\n target_dict_tmp[videoID[i]] = []\n output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3))\n target_dict_tmp[videoID[i]].append(target[i].view(1))\n prob_list = []\n label_list = []\n na = []\n for key in prob_dict.keys():\n avg_single_video_prob = sum(prob_dict[key]) / len(prob_dict[key])\n avg_single_video_label = sum(label_dict[key]) / len(label_dict[key])\n prob_list = np.append(prob_list, avg_single_video_prob)\n label_list = np.append(label_list, avg_single_video_label)\n # compute loss and acc for every video\n avg_single_video_output = sum(output_dict_tmp[key]) / len(output_dict_tmp[key])\n avg_single_video_target = (sum(target_dict_tmp[key]) / len(target_dict_tmp[key])).long()\n loss = criterion(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0))\n acc_valid = accuracy(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0), topk=(1,))\n # loss = criterion(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0))\n # acc_valid = accuracy(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0), topk=(1,))\n valid_losses.update(loss.item())\n valid_top1.update(acc_valid[0])\n\n if avg_single_video_label == 2:\n na += [avg_single_video_prob]\n\n label_list = np.where(np.array(label_list) == 1, np.ones_like(label_list), np.zeros_like(label_list))\n auc_score = roc_auc_score(label_list, prob_list)\n cur_EER_valid, threshold, _, _ = get_EER_states(prob_list, label_list)\n ACC_threshold = calculate_threshold(prob_list, label_list, threshold)\n cur_HTER_valid = get_HTER_at_thr(prob_list, label_list, threshold)\n\n na_acc = torch.mean((torch.tensor(na) < threshold).type(torch.float)).item()\n return [valid_losses.avg, valid_top1.avg, cur_EER_valid, cur_HTER_valid, auc_score, threshold, ACC_threshold*100, na_acc]"
},
{
"identifier": "get_dataset",
"path": "util/get_loader.py",
"snippet": "def get_dataset(src1_data, src1_train_num_frames, src2_data, src2_train_num_frames, src3_data, src3_train_num_frames,\n tgt1_data, tgt_test_num_frames, batch_size):\n print('Load Source Data')\n print('Source Data: ', src1_data)\n src1_train_data_fake = sample_frames(flag=0, num_frames=src1_train_num_frames, dataset_name=src1_data)\n src1_train_data_real = sample_frames(flag=1, num_frames=src1_train_num_frames, dataset_name=src1_data)\n print('Source Data: ', src2_data)\n src2_train_data_fake = sample_frames(flag=0, num_frames=src2_train_num_frames, dataset_name=src2_data)\n src2_train_data_real = sample_frames(flag=1, num_frames=src2_train_num_frames, dataset_name=src2_data)\n print('Source Data: ', src3_data)\n src3_train_data_fake = sample_frames(flag=0, num_frames=src3_train_num_frames, dataset_name=src3_data)\n src3_train_data_real = sample_frames(flag=1, num_frames=src3_train_num_frames, dataset_name=src3_data)\n \n \n print('Load Target Data')\n print('Target Data: ', tgt1_data)\n tgt_test_data = sample_frames(flag=2, num_frames=tgt_test_num_frames, dataset_name=tgt1_data)\n \n\n src1_train_dataloader_fake = DataLoader(YunpeiDataset(src1_train_data_fake, train=True),\n batch_size=batch_size, shuffle=True)\n src1_train_dataloader_real = DataLoader(YunpeiDataset(src1_train_data_real, train=True),\n batch_size=batch_size, shuffle=True)\n src2_train_dataloader_fake = DataLoader(YunpeiDataset(src2_train_data_fake, train=True),\n batch_size=batch_size, shuffle=True)\n src2_train_dataloader_real = DataLoader(YunpeiDataset(src2_train_data_real, train=True),\n batch_size=batch_size, shuffle=True)\n src3_train_dataloader_fake = DataLoader(YunpeiDataset(src3_train_data_fake, train=True),\n batch_size=batch_size, shuffle=True)\n src3_train_dataloader_real = DataLoader(YunpeiDataset(src3_train_data_real, train=True),\n batch_size=batch_size, shuffle=True)\n \n \n tgt_dataloader = DataLoader(YunpeiDataset(tgt_test_data, train=False), batch_size=batch_size, shuffle=False)\n return src1_train_dataloader_fake, src1_train_dataloader_real, \\\n src2_train_dataloader_fake, src2_train_dataloader_real, \\\n src3_train_dataloader_fake, src3_train_dataloader_real, \\\n tgt_dataloader"
},
{
"identifier": "get_training_arguments",
"path": "option.py",
"snippet": "def get_training_arguments(parse_args=True, config_path=None):\n parser = argparse.ArgumentParser()\n parser = arguments_nn_layers(parser=parser)\n parser = arguments_model(parser=parser)\n parser.add_argument('--common.config-file', type=str, default='./../../configs/mobilevit_xs.yaml')\n parser.add_argument('--dataset.category', type=str, default='classification')\n if parse_args:\n if config_path:\n opts = parser.parse_args(['--common.config-file', config_path])\n else:\n opts = parser.parse_args()\n opts = load_config_file(opts)\n return opts\n else:\n return parser"
}
] | import sys
import random
import numpy as np
import time
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.functional as F
from util.utils import save_checkpoint, AverageMeter, Logger, accuracy, mkdirs, time_to_str
from util.evaluate import eval
from util.get_loader import get_dataset
from config import config
from datetime import datetime
from timeit import default_timer as timer
from torch.utils.tensorboard import SummaryWriter
from cvnets.models import get_model
from option import get_training_arguments | 5,495 | if (iter_num % src1_iter_per_epoch_fake == 0):
src1_train_iter_fake = iter(src1_train_dataloader_fake)
if (iter_num % src2_iter_per_epoch_fake == 0):
src2_train_iter_fake = iter(src2_train_dataloader_fake)
if (iter_num % src3_iter_per_epoch_fake == 0):
src3_train_iter_fake = iter(src3_train_dataloader_fake)
if (iter_num != 0 and iter_num % iter_per_epoch == 0):
epoch = epoch + 1
param_lr_tmp = []
for param_group in optimizer.param_groups:
param_lr_tmp.append(param_group["lr"])
net.train(True)
optimizer.zero_grad()
######### data prepare #########
src1_img_real, src1_label_real = src1_train_iter_real.next()
src1_img_real = src1_img_real.cuda()
src1_label_real = src1_label_real.cuda()
src2_img_real, src2_label_real = src2_train_iter_real.next()
src2_img_real = src2_img_real.cuda()
src2_label_real = src2_label_real.cuda()
src3_img_real, src3_label_real = src3_train_iter_real.next()
src3_img_real = src3_img_real.cuda()
src3_label_real = src3_label_real.cuda()
src1_img_fake, src1_label_fake = src1_train_iter_fake.next()
src1_img_fake = src1_img_fake.cuda()
src1_label_fake = src1_label_fake.cuda()
src2_img_fake, src2_label_fake = src2_train_iter_fake.next()
src2_img_fake = src2_img_fake.cuda()
src2_label_fake = src2_label_fake.cuda()
src3_img_fake, src3_label_fake = src3_train_iter_fake.next()
src3_img_fake = src3_img_fake.cuda()
src3_label_fake = src3_label_fake.cuda()
input_data = torch.cat([src1_img_real, src1_img_fake, src2_img_real, src2_img_fake, src3_img_real, src3_img_fake], dim=0)
source_label = torch.cat([src1_label_real, src1_label_fake,
src2_label_real, src2_label_fake,
src3_label_real, src3_label_fake,
], dim=0)
######### forward #########
######### Copycat train #########
bsz = source_label.size(0)
net.train(False)
net2.train(True) # Copycat Model
optimizer2.zero_grad()
classifier_label_out, x11, x12, x13 = net(input_data, return_feature=True)
classifier_label_out2, x21, x22, x23 = net2(input_data, return_feature=True)
pullloss1 = criterion["l1"](x11.reshape(bsz, -1),x21.reshape(bsz,-1))
pullloss2 = criterion["l1"](x12.reshape(bsz, -1),x22.reshape(bsz,-1))
cls_loss = criterion["softmax"](classifier_label_out2.narrow(0, 0, input_data.size(0)), source_label)
pullloss = (pullloss1 + pullloss2) / 2
cls_loss = cls_loss + pullloss
cls_loss.backward()
optimizer2.step()
######## MainModel train ########
net.train(True)
net2.train(False) # Copycat Model
optimizer.zero_grad()
classifier_label_out, x11, x12, x13 = net(input_data, return_feature=True)
classifier_label_out2, x21, x22, x23 = net2(input_data, return_feature=True)
out21 = net(input_data, x1 = x21)
out22 = net(input_data, x2 = x22)
out23 = net(input_data, x3 = x23)
klu0 = criterion["lsr_hard"](out21, source_label)
klu1 = criterion["lsr_hard"](out22, source_label)
klu2 = criterion["lsr_easy"](out23, source_label)
klu = (klu0 + klu1 + klu2) / 3
# features_dim = 20*640*8*8
real_features = net.extract_features(input_data[source_label == 1])
l1_loss = criterion["l1"](real_features, torch.zeros_like(real_features))
######### cross-entropy loss #########
cls_loss = criterion["softmax"](classifier_label_out.narrow(0, 0, input_data.size(0)), source_label)
######### backward #########
total_loss = cls_loss + l1_loss + 0.1 * klu
total_loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_classifier.update(cls_loss.item())
acc = accuracy(classifier_label_out.narrow(0, 0, input_data.size(0)), source_label, topk=(1,))
classifer_top1.update(acc[0])
print('\r', end='', flush=True)
print(
' %4.1f | %5.3f %6.3f %6.3f %6.3f | %6.3f %6.3f | %6.3f %6.3f %6.3f | %s'
% (
(iter_num+1) / iter_per_epoch,
valid_args[0], valid_args[1], valid_args[3] * 100, valid_args[4] * 100,
loss_classifier.avg, classifer_top1.avg,
float(best_model_ACC), float(best_model_HTER * 100), float(best_model_AUC * 100),
time_to_str(timer() - start, 'min'))
, end='', flush=True)
if (iter_num != 0 and (iter_num+1) % iter_per_epoch == 0):
train_loss = loss_classifier.avg
train_acc = classifer_top1.avg
# 0:loss, 1:top-1, 2:EER, 3:HTER, 4:AUC, 5:threshold, 6:ACC_threshold
| sys.path.append('../../')
class SmoothCrossEntropy(nn.Module):
def __init__(self, alpha=0.5):
super(SmoothCrossEntropy, self).__init__()
self.alpha = alpha
def forward(self, logits, labels):
num_classes = logits.shape[-1]
alpha_div_k = self.alpha / num_classes
target_probs = nn.functional.one_hot(labels, num_classes=num_classes).float() * \
(1. - self.alpha) + alpha_div_k
loss = -(target_probs * torch.log_softmax(logits, dim=-1)).sum(dim=-1)
return loss.mean()
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.cuda.manual_seed(config.seed)
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
device = 'cuda'
def train():
mkdirs(config.checkpoint_path, config.best_model_path, config.logs)
# load data
src1_train_dataloader_fake, src1_train_dataloader_real, \
src2_train_dataloader_fake, src2_train_dataloader_real, \
src3_train_dataloader_fake, src3_train_dataloader_real, \
tgt_valid_dataloader = get_dataset(config.src1_data, config.src1_train_num_frames,
config.src2_data, config.src2_train_num_frames,
config.src3_data, config.src3_train_num_frames,
config.tgt_data, config.tgt_test_num_frames, config.batch_size)
best_model_ACC = 0.0
best_model_HTER = 1.0
best_model_ACER = 1.0
best_model_AUC = 0.0
# 0:loss, 1:top-1, 2:EER, 3:HTER, 4:ACER, 5:AUC, 6:threshold
valid_args = [np.inf, 0, 0, 0, 0, 0, 0, 0]
loss_classifier = AverageMeter()
classifer_top1 = AverageMeter()
opts = get_training_arguments(config_path='./../../configs/mobilevit_s.yaml')
net = get_model(opts).to(device)
net2 = get_model(opts).to(device)
state_dict = torch.load('./../../pretrained_model/mobilevit_s.pt')
del state_dict['classifier.fc.weight']
del state_dict['classifier.fc.bias']
net.load_state_dict(state_dict, strict=False)
net2.load_state_dict(state_dict, strict=False)
writer = SummaryWriter('./logs/runs')
log = Logger()
log.open(config.logs + config.tgt_data + '_log.txt', mode='a')
log.write("\n----------------------------------------------- [START %s] %s\n\n" % (
datetime.now().strftime('%Y-%m-%d %H:%M:%S'), '-' * 51))
log.write('** start training target model! **\n')
log.write(
'--------|------------- VALID -------------|--- classifier ---|------ Current Best ------|--------------|\n')
log.write(
' iter | loss top-1 HTER AUC | loss top-1 | top-1 HTER AUC | time |\n')
log.write(
'-------------------------------------------------------------------------------------------------------|\n')
start = timer()
criterion = {
'softmax': nn.CrossEntropyLoss(label_smoothing=0.1).cuda(),
'l1': nn.L1Loss().cuda(),
'lsr_hard' : SmoothCrossEntropy(0.5),
'lsr_easy' : SmoothCrossEntropy(1.0)
}
optimizer_dict = [
{"params": filter(lambda p: p.requires_grad, net.parameters()), "lr": config.init_lr},
]
optimizer_dict2 = [
{"params": filter(lambda p: p.requires_grad, net2.parameters()), "lr": config.init_lr},
]
optimizer = optim.Adam(optimizer_dict, lr=config.init_lr, weight_decay=config.weight_decay)
optimizer2 = optim.Adam(optimizer_dict2, lr=config.init_lr, weight_decay=config.weight_decay)
init_param_lr = []
for param_group in optimizer.param_groups:
init_param_lr.append(param_group["lr"])
iter_per_epoch = 10
src1_train_iter_real = iter(src1_train_dataloader_real)
src1_iter_per_epoch_real = len(src1_train_iter_real)
src2_train_iter_real = iter(src2_train_dataloader_real)
src2_iter_per_epoch_real = len(src2_train_iter_real)
src3_train_iter_real = iter(src3_train_dataloader_real)
src3_iter_per_epoch_real = len(src3_train_iter_real)
src1_train_iter_fake = iter(src1_train_dataloader_fake)
src1_iter_per_epoch_fake = len(src1_train_iter_fake)
src2_train_iter_fake = iter(src2_train_dataloader_fake)
src2_iter_per_epoch_fake = len(src2_train_iter_fake)
src3_train_iter_fake = iter(src3_train_dataloader_fake)
src3_iter_per_epoch_fake = len(src3_train_iter_fake)
max_iter = config.max_iter
epoch = 1
if(len(config.gpus) > 1):
net = torch.nn.DataParallel(net).cuda()
net2 = torch.nn.DataParallel(net).cuda()
for iter_num in range(max_iter+1):
if (iter_num % src1_iter_per_epoch_real == 0):
src1_train_iter_real = iter(src1_train_dataloader_real)
if (iter_num % src2_iter_per_epoch_real == 0):
src2_train_iter_real = iter(src2_train_dataloader_real)
if (iter_num % src3_iter_per_epoch_real == 0):
src3_train_iter_real = iter(src3_train_dataloader_real)
if (iter_num % src1_iter_per_epoch_fake == 0):
src1_train_iter_fake = iter(src1_train_dataloader_fake)
if (iter_num % src2_iter_per_epoch_fake == 0):
src2_train_iter_fake = iter(src2_train_dataloader_fake)
if (iter_num % src3_iter_per_epoch_fake == 0):
src3_train_iter_fake = iter(src3_train_dataloader_fake)
if (iter_num != 0 and iter_num % iter_per_epoch == 0):
epoch = epoch + 1
param_lr_tmp = []
for param_group in optimizer.param_groups:
param_lr_tmp.append(param_group["lr"])
net.train(True)
optimizer.zero_grad()
######### data prepare #########
src1_img_real, src1_label_real = src1_train_iter_real.next()
src1_img_real = src1_img_real.cuda()
src1_label_real = src1_label_real.cuda()
src2_img_real, src2_label_real = src2_train_iter_real.next()
src2_img_real = src2_img_real.cuda()
src2_label_real = src2_label_real.cuda()
src3_img_real, src3_label_real = src3_train_iter_real.next()
src3_img_real = src3_img_real.cuda()
src3_label_real = src3_label_real.cuda()
src1_img_fake, src1_label_fake = src1_train_iter_fake.next()
src1_img_fake = src1_img_fake.cuda()
src1_label_fake = src1_label_fake.cuda()
src2_img_fake, src2_label_fake = src2_train_iter_fake.next()
src2_img_fake = src2_img_fake.cuda()
src2_label_fake = src2_label_fake.cuda()
src3_img_fake, src3_label_fake = src3_train_iter_fake.next()
src3_img_fake = src3_img_fake.cuda()
src3_label_fake = src3_label_fake.cuda()
input_data = torch.cat([src1_img_real, src1_img_fake, src2_img_real, src2_img_fake, src3_img_real, src3_img_fake], dim=0)
source_label = torch.cat([src1_label_real, src1_label_fake,
src2_label_real, src2_label_fake,
src3_label_real, src3_label_fake,
], dim=0)
######### forward #########
######### Copycat train #########
bsz = source_label.size(0)
net.train(False)
net2.train(True) # Copycat Model
optimizer2.zero_grad()
classifier_label_out, x11, x12, x13 = net(input_data, return_feature=True)
classifier_label_out2, x21, x22, x23 = net2(input_data, return_feature=True)
pullloss1 = criterion["l1"](x11.reshape(bsz, -1),x21.reshape(bsz,-1))
pullloss2 = criterion["l1"](x12.reshape(bsz, -1),x22.reshape(bsz,-1))
cls_loss = criterion["softmax"](classifier_label_out2.narrow(0, 0, input_data.size(0)), source_label)
pullloss = (pullloss1 + pullloss2) / 2
cls_loss = cls_loss + pullloss
cls_loss.backward()
optimizer2.step()
######## MainModel train ########
net.train(True)
net2.train(False) # Copycat Model
optimizer.zero_grad()
classifier_label_out, x11, x12, x13 = net(input_data, return_feature=True)
classifier_label_out2, x21, x22, x23 = net2(input_data, return_feature=True)
out21 = net(input_data, x1 = x21)
out22 = net(input_data, x2 = x22)
out23 = net(input_data, x3 = x23)
klu0 = criterion["lsr_hard"](out21, source_label)
klu1 = criterion["lsr_hard"](out22, source_label)
klu2 = criterion["lsr_easy"](out23, source_label)
klu = (klu0 + klu1 + klu2) / 3
# features_dim = 20*640*8*8
real_features = net.extract_features(input_data[source_label == 1])
l1_loss = criterion["l1"](real_features, torch.zeros_like(real_features))
######### cross-entropy loss #########
cls_loss = criterion["softmax"](classifier_label_out.narrow(0, 0, input_data.size(0)), source_label)
######### backward #########
total_loss = cls_loss + l1_loss + 0.1 * klu
total_loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_classifier.update(cls_loss.item())
acc = accuracy(classifier_label_out.narrow(0, 0, input_data.size(0)), source_label, topk=(1,))
classifer_top1.update(acc[0])
print('\r', end='', flush=True)
print(
' %4.1f | %5.3f %6.3f %6.3f %6.3f | %6.3f %6.3f | %6.3f %6.3f %6.3f | %s'
% (
(iter_num+1) / iter_per_epoch,
valid_args[0], valid_args[1], valid_args[3] * 100, valid_args[4] * 100,
loss_classifier.avg, classifer_top1.avg,
float(best_model_ACC), float(best_model_HTER * 100), float(best_model_AUC * 100),
time_to_str(timer() - start, 'min'))
, end='', flush=True)
if (iter_num != 0 and (iter_num+1) % iter_per_epoch == 0):
train_loss = loss_classifier.avg
train_acc = classifer_top1.avg
# 0:loss, 1:top-1, 2:EER, 3:HTER, 4:AUC, 5:threshold, 6:ACC_threshold | valid_args = eval(tgt_valid_dataloader, net) | 6 | 2023-10-17 15:35:33+00:00 | 8k |
jianlanluo/SAQ | vqn/vqiql_main.py | [
{
"identifier": "VQIQLLearner",
"path": "vqn/vqiql.py",
"snippet": "class VQIQLLearner(Agent):\n\n def __init__(self,\n seed: int,\n observations: jnp.ndarray,\n actions: jnp.ndarray,\n vqvae_lr: float = 3e-4,\n embedding_dim: int = 128,\n codebook_size: int = 64,\n commitment_cost: float = 1.0,\n quantization_cost: float = 1.0,\n entropy_loss_ratio: float = 0.0,\n entropy_loss_type: str = \"softmax\",\n entropy_temperature: float = 1.0,\n vqvae_arch: str = '512-512',\n action_only_quantization: bool = False,\n reconstruction_loss_type: str = 'l2',\n sample_action: bool = True,\n actor_lr: float = 3e-4,\n value_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n policy_weight_decay: float = 0.0,\n qf_weight_decay: float = 0.0,\n decay_steps: Optional[int] = None,\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n expectile: float = 0.9,\n A_scaling: float = 10.0,\n critic_reduction: str = 'min',\n apply_tanh: bool = False,\n dropout_rate: Optional[float] = None,\n policy_arch='256-256',\n policy_log_std_multiplier=1.0,\n policy_log_std_offset=-1.0,\n behavior_policy_lr=3e-4,\n behavior_policy_weight_decay=0.0,\n kl_divergence_weight=1.0,):\n \"\"\"\n An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290\n \"\"\"\n\n self.expectile = expectile\n self.tau = tau\n self.discount = discount\n self.critic_reduction = critic_reduction\n self.A_scaling = A_scaling\n self.kl_divergence_weight = kl_divergence_weight\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key, value_key = jax.random.split(rng, 4)\n \n if qf_weight_decay != 0:\n critic_optimiser = optax.adamw(learning_rate=critic_lr, weight_decay=qf_weight_decay)\n else:\n critic_optimiser = optax.adam(learning_rate=critic_lr)\n\n critic_def = StateActionEnsemble(hidden_dims, num_qs=1, output_dims=codebook_size)\n critic_params = critic_def.init(critic_key, observations,\n actions)['params']\n critic = TrainState.create(apply_fn=critic_def.apply,\n params=critic_params,\n tx=critic_optimiser)\n target_critic_params = copy.deepcopy(critic_params)\n\n value_def = StateValue(hidden_dims)\n value_params = value_def.init(value_key, observations)['params']\n value = TrainState.create(apply_fn=value_def.apply,\n params=value_params,\n tx=optax.adam(learning_rate=value_lr))\n\n self._rng = rng\n self._critic = critic\n self._target_critic_params = target_critic_params\n self._value = value\n\n self.observation_dim = observations.shape[-1]\n self.action_dim = actions.shape[-1]\n\n self.vqvae = ActionVQVAE(\n observation_dim=self.observation_dim,\n action_dim=self.action_dim,\n embedding_dim=embedding_dim,\n codebook_size=codebook_size,\n commitment_cost=commitment_cost,\n quantization_cost=quantization_cost,\n entropy_loss_ratio=entropy_loss_ratio,\n entropy_loss_type=entropy_loss_type,\n entropy_temperature=entropy_temperature,\n arch=vqvae_arch,\n action_only_quantization=action_only_quantization,\n reconstruction_loss_type=reconstruction_loss_type,\n )\n self._vqvae_train_state = TrainState.create(\n params=self.vqvae.init(\n next_rng(self.vqvae.rng_keys()),\n jnp.zeros((1, self.observation_dim)),\n jnp.zeros((1, self.action_dim)),\n train=True\n ),\n tx=optax.adam(vqvae_lr),\n apply_fn=None,\n )\n\n self._vqvae_total_steps = 0\n self.sample_action = sample_action\n\n self.behavior_policy = FullyConnectedNetwork(\n codebook_size\n )\n behavior_policy_params = self.behavior_policy.init(\n next_rng(self.behavior_policy.rng_keys()),\n jnp.zeros((10, self.observation_dim))\n )\n self._behavior_policy_train_state = TrainState.create(\n params=behavior_policy_params,\n tx=optax.adamw(\n behavior_policy_lr, \n weight_decay=behavior_policy_weight_decay),\n apply_fn=None\n )\n self._behavior_policy_total_steps = 0\n\n self._sampler_policy = VQSamplerPolicy(\n self._critic, self.vqvae, self.behavior_policy,\n self._critic.params, self._vqvae_train_state.params, self._behavior_policy_train_state.params, \n sample_action, A_scaling, kl_divergence_weight\n )\n \n def train_behavior_policy(self, batch):\n self._behavior_policy_train_state, metrics = self._behavior_policy_train_step(\n next_rng(), self._behavior_policy_train_state, batch\n )\n self._behavior_policy_total_steps += 1\n return metrics\n\n @partial(jax.jit, static_argnames=('self', ))\n def _behavior_policy_train_step(self, rng, train_state, batch):\n observations = batch['observations']\n actions = batch['actions']\n rng_generator = JaxRNG(rng)\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_param, rng):\n observations = batch['observations']\n actions = self.vqvae.apply(\n self._vqvae_train_state.params,\n observations,\n batch['actions'],\n method=self.vqvae.encode\n )\n\n @wrap_function_with_rng(rng_generator())\n def forward_behavior_policy(rng, *args, **kwargs):\n return self.behavior_policy.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.behavior_policy.rng_keys())\n )\n \n q_values = forward_behavior_policy(train_param, observations)\n log_probs = jnp.mean(optax.softmax_cross_entropy_with_integer_labels(q_values, actions))\n policy_loss = log_probs\n\n return policy_loss, locals()\n grads, aux_values = grad_fn(train_state.params, rng)\n new_train_state = train_state.apply_gradients(grads=grads)\n metrics = collect_jax_metrics(\n aux_values,\n ['policy_loss', 'log_probs'],\n )\n return new_train_state, metrics\n \n\n @partial(jax.jit, static_argnames=('self', ))\n def _vqvae_train_step(self, rng, train_state, batch):\n observations = batch['observations']\n actions = batch['actions']\n rng_generator = JaxRNG(rng)\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_params):\n reconstructed, result_dict = self.vqvae.apply(\n train_params,\n observations,\n actions,\n train=True,\n )\n return result_dict['loss'], result_dict\n\n grads, aux_values = grad_fn(train_state.params)\n new_train_state = train_state.apply_gradients(grads=grads)\n metrics = collect_jax_metrics(\n aux_values,\n ['loss', 'reconstruction_loss', 'quantizer_loss', 'e_latent_loss', 'q_latent_loss',\n 'entropy_loss', 'action_prior_loss', 'action_prior_accuracy'],\n )\n return new_train_state, metrics\n\n def train_vqvae(self, batch):\n self._vqvae_train_state, metrics = self._vqvae_train_step(\n next_rng(), self._vqvae_train_state, batch\n )\n self._vqvae_total_steps += 1\n return metrics\n\n def update(self, batch: FrozenDict) -> Dict[str, float]:\n new_rng, new_critic, new_target_critic, new_value, info = self._update_jit(\n self._rng, self._critic, self._target_critic_params, self._vqvae_train_state,\n self._value, batch, self.discount, self.tau, self.expectile,\n self.A_scaling, self.critic_reduction, self._behavior_policy_train_state)\n\n self._rng = new_rng\n self._critic = new_critic\n self._target_critic_params = new_target_critic\n self._value = new_value\n\n return info\n\n @partial(jax.jit, static_argnames=('self', 'critic_reduction'))\n def _update_jit(\n self, rng: PRNGKey, critic: TrainState,\n target_critic_params: Params, vqvae: TrainState, value: TrainState, batch: TrainState,\n discount: float, tau: float, expectile: float, A_scaling: float,\n critic_reduction: str, policy: TrainState\n ) -> Tuple[PRNGKey, TrainState, Params, TrainState, Dict[str,\n float]]:\n observations = batch['observations']\n original_actions = batch['actions']\n\n actions = self.vqvae.apply(\n vqvae.params,\n observations,\n original_actions,\n method=self.vqvae.encode\n )\n \n\n log_prob = self.behavior_policy.apply(self._behavior_policy_train_state.params, batch['observations'])\n\n target_critic = critic.replace(params=target_critic_params)\n new_value, value_info = update_v(target_critic, value, batch, actions, expectile,\n critic_reduction)\n key, rng = jax.random.split(rng)\n\n new_critic, critic_info = update_q(critic, new_value, batch, actions, discount, log_prob, self.kl_divergence_weight)\n\n new_target_critic_params = soft_target_update(new_critic.params,\n target_critic_params, tau)\n\n return rng, new_critic, new_target_critic_params, new_value, {\n **critic_info,\n **value_info,\n }\n\n def get_sampler_policy(self):\n return self._sampler_policy.update_params(\n self._critic.params, self._vqvae_train_state.params, self._behavior_policy_train_state.params\n )"
},
{
"identifier": "IQLSamplerPolicy",
"path": "vqn/vqiql.py",
"snippet": "class IQLSamplerPolicy(object):\n\n def __init__(self, actor):\n self.actor=actor\n rng = jax.random.PRNGKey(24)\n rng, actor_key, critic_key, value_key = jax.random.split(rng, 4)\n self.rng = rng\n\n def __call__(self, observations, deterministic=False):\n actions = self.sample_actions(observations)\n\n assert jnp.all(jnp.isfinite(actions))\n return jax.device_get(actions)\n\n def sample_actions(self,\n observations: np.ndarray,\n temperature: float = 1.0) -> jnp.ndarray:\n rng, actions = sample_actions_jit(self.rng, self.actor.apply_fn,\n self.actor.params, observations)\n\n self.rng = rng\n\n actions = np.asarray(actions)\n return np.clip(actions, -1, 1)"
},
{
"identifier": "split_into_trajectories",
"path": "vqn/vqiql.py",
"snippet": "def split_into_trajectories(observations, actions, rewards, masks, dones_float,\n next_observations):\n trajs = [[]]\n\n for i in tqdm(range(len(observations))):\n trajs[-1].append((observations[i], actions[i], rewards[i], masks[i],\n dones_float[i], next_observations[i]))\n if dones_float[i] == 1.0 and i + 1 < len(observations):\n trajs.append([])\n\n return trajs"
},
{
"identifier": "get_d4rl_dataset",
"path": "vqn/replay_buffer.py",
"snippet": "def get_d4rl_dataset(env):\n dataset = d4rl.qlearning_dataset(env)\n return dict(\n observations=dataset['observations'],\n actions=dataset['actions'],\n next_observations=dataset['next_observations'],\n rewards=dataset['rewards'],\n dones=dataset['terminals'].astype(np.float32),\n )"
},
{
"identifier": "TrajSampler",
"path": "vqn/sampler.py",
"snippet": "class TrajSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n\n def sample(self, policy, n_trajs, replay_buffer=None, deterministic=False):\n trajs = []\n for _ in range(n_trajs):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n observation = self.env.reset()\n\n for _ in range(self.max_traj_length):\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n observation = next_observation\n\n if done:\n break\n\n trajs.append(dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n ))\n\n return trajs\n\n @property\n def env(self):\n return self._env"
},
{
"identifier": "OfflineDataset",
"path": "vqn/robomimic_utils.py",
"snippet": "class OfflineDataset(Dataset):\n\n def __init__(self,\n dataset_dict: dict,\n clip_to_eps: bool = True,\n eps: float = 1e-5):\n\n if clip_to_eps:\n lim = 1 - eps\n dataset_dict['actions'] = np.clip(dataset_dict['actions'], -lim,\n lim)\n\n dones = np.full_like(dataset_dict['rewards'], False, dtype=bool)\n\n for i in range(len(dones) - 1):\n if np.linalg.norm(dataset_dict['observations'][i + 1] -\n dataset_dict['next_observations'][i]\n ) > 1e-6 or dataset_dict['dones'][i] == 1.0:\n dones[i] = True\n\n dones[-1] = True\n\n dataset_dict['masks'] = 1.0 - dataset_dict['dones']\n del dataset_dict['dones']\n\n for k, v in dataset_dict.items():\n dataset_dict[k] = v.astype(np.float32)\n\n dataset_dict['dones'] = dones\n\n super().__init__(dataset_dict)"
},
{
"identifier": "process_robomimic_dataset",
"path": "vqn/robomimic_utils.py",
"snippet": "def process_robomimic_dataset(seq_dataset):\n seq_dataset = seq_dataset.getitem_cache\n\n for i in range(len(seq_dataset)):\n seq_dataset[i]['obs'] = np.concatenate([seq_dataset[i]['obs'][key] \n for key in OBS_KEYS], axis=1)\n seq_dataset[i]['next_obs'] = np.concatenate([seq_dataset[i]['next_obs'][key] \n for key in OBS_KEYS], axis=1)\n\n dataset = {'actions': np.concatenate([seq_dataset[i]['actions'] for i in range(len(seq_dataset))]),\n 'rewards': np.concatenate([seq_dataset[i]['rewards'] for i in range(len(seq_dataset))]),\n 'terminals': np.concatenate([seq_dataset[i]['dones'] for i in range(len(seq_dataset))]),\n 'observations': np.concatenate([seq_dataset[i]['obs'] for i in range(len(seq_dataset))]),\n 'next_observations': np.concatenate([seq_dataset[i]['next_obs'] for i in range(len(seq_dataset))])}\n return dataset"
},
{
"identifier": "D4RLDataset",
"path": "vqn/robomimic_utils.py",
"snippet": "class D4RLDataset(Dataset):\n def __init__(self,\n env: gym.Env,\n clip_to_eps: bool = True,\n eps: float = 1e-5,\n ignore_done: bool = False,\n custom_dataset: dict = None):\n if custom_dataset:\n if env is not None:\n dataset = d4rl.qlearning_dataset(env, dataset=custom_dataset)\n else:\n dataset = custom_dataset\n print(\"Loaded custom dataset\")\n else:\n dataset = d4rl.qlearning_dataset(env)\n if clip_to_eps:\n lim = 1 - eps\n dataset['actions'] = np.clip(dataset['actions'], -lim, lim)\n dones_float = np.zeros_like(dataset['rewards'])\n for i in range(len(dones_float) - 1):\n if ignore_done:\n if np.linalg.norm(dataset['observations'][i + 1] - dataset['next_observations'][i]) > 1e-6:\n dones_float[i] = 1\n else:\n dones_float[i] = 0\n else:\n if np.linalg.norm(dataset['observations'][i + 1] - dataset['next_observations'][i]) > 1e-6 or dataset['terminals'][i] == 1.0:\n dones_float[i] = 1\n else:\n dones_float[i] = 0\n dones_float[-1] = 1\n dataset_dict = {\n 'observations': dataset['observations'].astype(np.float32),\n 'actions': dataset['actions'].astype(np.float32),\n 'rewards': dataset['rewards'].astype(np.float32),\n 'masks': 1.0 - dataset['terminals'].astype(np.float32),\n 'dones': dones_float.astype(np.float32),\n 'next_observations': dataset['next_observations'].astype(\n np.float32)\n }\n super().__init__(dataset_dict)"
},
{
"identifier": "get_robomimic_env",
"path": "vqn/robomimic_utils.py",
"snippet": "def get_robomimic_env(dataset_path, example_action, env_name):\n # Initialize ObsUtils environment variables\n ObsUtils.initialize_obs_utils_with_config(config_factory(algo_name='iql'))\n env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path)\n env = EnvUtils.create_env_from_metadata(\n env_meta=env_meta,\n render=False, \n render_offscreen=False, \n use_image_obs=False,\n )\n env = RobosuiteGymWrapper(env, ENV_TO_HORIZON_MAP[env_name], example_action)\n return env"
},
{
"identifier": "ENV_TO_HORIZON_MAP",
"path": "vqn/robomimic_utils.py",
"snippet": "ENV_TO_HORIZON_MAP = {'lift': 400,\n 'can': 400,\n 'square': 400,\n 'transport': 700,\n 'tool_hang': 700}"
},
{
"identifier": "OBS_KEYS",
"path": "vqn/robomimic_utils.py",
"snippet": "OBS_KEYS = (\"robot0_eef_pos\", \"robot0_eef_quat\", \"robot0_gripper_qpos\", \"object\")"
},
{
"identifier": "Timer",
"path": "vqn/utils.py",
"snippet": "class Timer(object):\n\n def __init__(self):\n self._time = None\n\n def __enter__(self):\n self._start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._time = time.time() - self._start_time\n\n def __call__(self):\n return self._time"
},
{
"identifier": "define_flags_with_default",
"path": "vqn/utils.py",
"snippet": "def define_flags_with_default(**kwargs):\n for key, val in kwargs.items():\n if isinstance(val, ConfigDict):\n config_flags.DEFINE_config_dict(key, val)\n elif isinstance(val, bool):\n # Note that True and False are instances of int.\n absl.flags.DEFINE_bool(key, val, 'automatically defined flag')\n elif isinstance(val, int):\n absl.flags.DEFINE_integer(key, val, 'automatically defined flag')\n elif isinstance(val, float):\n absl.flags.DEFINE_float(key, val, 'automatically defined flag')\n elif isinstance(val, str):\n absl.flags.DEFINE_string(key, val, 'automatically defined flag')\n else:\n raise ValueError('Incorrect value type')\n return kwargs"
},
{
"identifier": "set_random_seed",
"path": "vqn/utils.py",
"snippet": "def set_random_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n init_rng(seed)"
},
{
"identifier": "get_user_flags",
"path": "vqn/utils.py",
"snippet": "def get_user_flags(flags, flags_def):\n output = {}\n for key in flags_def:\n val = getattr(flags, key)\n if isinstance(val, ConfigDict):\n output.update(flatten_config_dict(val, prefix=key))\n else:\n output[key] = val\n\n return output"
},
{
"identifier": "prefix_metrics",
"path": "vqn/utils.py",
"snippet": "def prefix_metrics(metrics, prefix):\n return {\n '{}/{}'.format(prefix, key): value for key, value in metrics.items()\n }"
},
{
"identifier": "WandBLogger",
"path": "vqn/utils.py",
"snippet": "class WandBLogger(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.online = False\n config.prefix = 'JaxCQL'\n config.project = ''\n config.output_dir = '/tmp/JaxCQL'\n config.random_delay = 0.0\n config.experiment_id = config_dict.placeholder(str)\n config.anonymous = config_dict.placeholder(str)\n config.notes = config_dict.placeholder(str)\n config.entity = config_dict.placeholder(str)\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, variant):\n self.config = self.get_default_config(config)\n\n if self.config.experiment_id is None:\n self.config.experiment_id = uuid.uuid4().hex\n\n if self.config.prefix != '':\n self.config.project = '{}--{}'.format(self.config.prefix, self.config.project)\n\n if self.config.output_dir == '':\n self.config.output_dir = tempfile.mkdtemp()\n else:\n self.config.output_dir = os.path.join(self.config.output_dir, self.config.experiment_id)\n os.makedirs(self.config.output_dir, exist_ok=True)\n\n self._variant = copy(variant)\n\n if 'hostname' not in self._variant:\n self._variant['hostname'] = gethostname()\n\n if self.config.random_delay > 0:\n time.sleep(np.random.uniform(0, self.config.random_delay))\n\n self.run = wandb.init(\n reinit=True,\n config=self._variant,\n project=self.config.project,\n dir=self.config.output_dir,\n entity=config.entity,\n id=self.config.experiment_id,\n anonymous=self.config.anonymous,\n notes=self.config.notes,\n settings=wandb.Settings(\n start_method=\"thread\",\n _disable_stats=True,\n ),\n mode='online' if self.config.online else 'offline',\n )\n\n def log(self, *args, **kwargs):\n self.run.log(*args, **kwargs)\n\n def save_pickle(self, obj, filename):\n with open(os.path.join(self.config.output_dir, filename), 'wb') as fout:\n pickle.dump(obj, fout)\n\n @property\n def experiment_id(self):\n return self.config.experiment_id\n\n @property\n def variant(self):\n return self.config.variant\n\n @property\n def output_dir(self):\n return self.config.output_dir"
}
] | from absl import app, flags
from ml_collections import config_flags
from robomimic.utils.dataset import SequenceDataset
from .vqiql import VQIQLLearner, IQLSamplerPolicy, split_into_trajectories
from .replay_buffer import get_d4rl_dataset
from .sampler import TrajSampler
from .robomimic_utils import (
OfflineDataset, process_robomimic_dataset, D4RLDataset, get_robomimic_env,
ENV_TO_HORIZON_MAP, OBS_KEYS
)
from .utils import (
Timer, define_flags_with_default, set_random_seed,
get_user_flags, prefix_metrics, WandBLogger
)
import cloudpickle as pickle
import tqdm
import gym
import numpy as np
import d4rl | 6,559 |
FLAGS = flags.FLAGS
FLAGS_DEF = define_flags_with_default(
env='pen-human-v1',
dataset_dir = '',
seed=42,
save_model=False,
zero_reward=False,
reward_scale=1.0,
reward_bias=0.0,
max_traj_length=200,
eval_n_trajs=10,
eval_period=10,
batch_size=256,
vqvae_n_epochs=500,
n_epochs=1000,
n_pi_beta_epochs=2000,
n_train_step_per_epoch=50,
tqdm=True,
embedding_dim=128,
codebook_size=64,
commitment_cost=1.0,
quantization_cost=1.0,
entropy_loss_ratio=0.0,
entropy_loss_type="softmax",
entropy_temperature=1.0,
vqvae_arch='512-512',
sample_action=True,
policy_weight_decay=0.0,
kl_divergence_weight=0.0,
qf_weight_decay=0.0,
qf_arch='256-256',
iql_expectile=0.8,
iql_temperature=0.1,
iql_bc_loss_weight=0.0,
logging=WandBLogger.get_default_config(),
)
OBS_KEYS = ("robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object")
ENV_TO_HORIZON_MAP = {'lift': 400,
'can': 400,
'square': 400,
'transport': 700,
'tool_hang': 700}
def normalize(dataset):
trajs = split_into_trajectories(dataset.observations, dataset.actions,
dataset.rewards, dataset.masks,
dataset.dones_float,
dataset.next_observations)
def compute_returns(traj):
episode_return = 0
for _, _, rew, _, _, _ in traj:
episode_return += rew
return episode_return
trajs.sort(key=compute_returns)
dataset.rewards /= compute_returns(trajs[-1]) - compute_returns(trajs[0])
dataset.rewards *= 1000.0
def make_dataset(dataset, env_name, zero_reward=False):
if zero_reward:
dataset['reward'] = np.zeros_like(dataset['rewards'])
if not env_name in ENV_TO_HORIZON_MAP:
dataset = OfflineDataset(dataset)
if 'antmaze' in env_name:
dataset.dataset_dict['rewards'] *= 100
elif env_name.split('-')[0] in ['hopper', 'halfcheetah', 'walker2d']:
dataset.normalize_returns(scaling=1000)
return dataset
def main(_):
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
set_random_seed(FLAGS.seed)
if FLAGS.env in ENV_TO_HORIZON_MAP:
dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5'
seq_dataset = SequenceDataset(hdf5_path=dataset_path,
obs_keys=OBS_KEYS,
dataset_keys=("actions", "rewards", "dones"),
hdf5_cache_mode="all",
load_next_obs=True)
|
FLAGS = flags.FLAGS
FLAGS_DEF = define_flags_with_default(
env='pen-human-v1',
dataset_dir = '',
seed=42,
save_model=False,
zero_reward=False,
reward_scale=1.0,
reward_bias=0.0,
max_traj_length=200,
eval_n_trajs=10,
eval_period=10,
batch_size=256,
vqvae_n_epochs=500,
n_epochs=1000,
n_pi_beta_epochs=2000,
n_train_step_per_epoch=50,
tqdm=True,
embedding_dim=128,
codebook_size=64,
commitment_cost=1.0,
quantization_cost=1.0,
entropy_loss_ratio=0.0,
entropy_loss_type="softmax",
entropy_temperature=1.0,
vqvae_arch='512-512',
sample_action=True,
policy_weight_decay=0.0,
kl_divergence_weight=0.0,
qf_weight_decay=0.0,
qf_arch='256-256',
iql_expectile=0.8,
iql_temperature=0.1,
iql_bc_loss_weight=0.0,
logging=WandBLogger.get_default_config(),
)
OBS_KEYS = ("robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object")
ENV_TO_HORIZON_MAP = {'lift': 400,
'can': 400,
'square': 400,
'transport': 700,
'tool_hang': 700}
def normalize(dataset):
trajs = split_into_trajectories(dataset.observations, dataset.actions,
dataset.rewards, dataset.masks,
dataset.dones_float,
dataset.next_observations)
def compute_returns(traj):
episode_return = 0
for _, _, rew, _, _, _ in traj:
episode_return += rew
return episode_return
trajs.sort(key=compute_returns)
dataset.rewards /= compute_returns(trajs[-1]) - compute_returns(trajs[0])
dataset.rewards *= 1000.0
def make_dataset(dataset, env_name, zero_reward=False):
if zero_reward:
dataset['reward'] = np.zeros_like(dataset['rewards'])
if not env_name in ENV_TO_HORIZON_MAP:
dataset = OfflineDataset(dataset)
if 'antmaze' in env_name:
dataset.dataset_dict['rewards'] *= 100
elif env_name.split('-')[0] in ['hopper', 'halfcheetah', 'walker2d']:
dataset.normalize_returns(scaling=1000)
return dataset
def main(_):
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
set_random_seed(FLAGS.seed)
if FLAGS.env in ENV_TO_HORIZON_MAP:
dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5'
seq_dataset = SequenceDataset(hdf5_path=dataset_path,
obs_keys=OBS_KEYS,
dataset_keys=("actions", "rewards", "dones"),
hdf5_cache_mode="all",
load_next_obs=True) | dataset = process_robomimic_dataset(seq_dataset) | 6 | 2023-10-18 06:31:20+00:00 | 8k |
naver-ai/dual-teacher | tools/train.py | [
{
"identifier": "__version__",
"path": "mmseg/version.py",
"snippet": "def parse_version_info(version_str):"
},
{
"identifier": "set_random_seed",
"path": "mmseg/apis/train.py",
"snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False"
},
{
"identifier": "train_segmentor",
"path": "mmseg/apis/train.py",
"snippet": "def train_segmentor(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n \"\"\"Launch segmentor training.\"\"\"\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n data_loaders = [\n build_dataloader(\n ds,\n cfg.data.samples_per_gpu,\n cfg.data.workers_per_gpu,\n # cfg.gpus will be ignored if distributed\n len(cfg.gpu_ids),\n dist=distributed,\n seed=cfg.seed,\n drop_last=True) for ds in dataset\n ]\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', False)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n\n # build runner\n optimizer = build_optimizer(model, cfg.optimizer)\n\n if cfg.get('runner') is None:\n cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}\n warnings.warn(\n 'config is now expected to have a `runner` section, '\n 'please set `runner` in your config.', UserWarning)\n\n runner = build_runner(\n cfg.runner,\n default_args=dict(\n model=model,\n batch_processor=None,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta))\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n\n # an ugly walkaround to make the .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n # register eval hooks\n if validate:\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n val_dataloader = build_dataloader(\n val_dataset,\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n eval_cfg = cfg.get('evaluation', {})\n eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow)"
},
{
"identifier": "build_dataloader",
"path": "mmseg/datasets/builder.py",
"snippet": "def build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=False,\n pin_memory=True,\n dataloader_type='PoolDataLoader',\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int | None): Seed to be used. Default: None.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: False\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n assert dataloader_type in (\n 'DataLoader',\n 'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'\n\n if dataloader_type == 'PoolDataLoader':\n dataloader = PoolDataLoader\n elif dataloader_type == 'DataLoader':\n dataloader = DataLoader\n\n data_loader = dataloader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader"
},
{
"identifier": "build_dataset",
"path": "mmseg/datasets/builder.py",
"snippet": "def build_dataset(cfg, default_args=None):\n \"\"\"Build datasets.\"\"\"\n from .dataset_wrappers import ConcatDataset, RepeatDataset\n if isinstance(cfg, (list, tuple)):\n dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(\n cfg.get('split', None), (list, tuple)):\n dataset = _concat_dataset(cfg, default_args)\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n\n return dataset"
},
{
"identifier": "build_segmentor",
"path": "mmseg/models/builder.py",
"snippet": "def build_segmentor(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build segmentor.\"\"\"\n if train_cfg is not None or test_cfg is not None:\n warnings.warn(\n 'train_cfg and test_cfg is deprecated, '\n 'please specify them in model', UserWarning)\n assert cfg.get('train_cfg') is None or train_cfg is None, \\\n 'train_cfg specified in both outer field and model field '\n assert cfg.get('test_cfg') is None or test_cfg is None, \\\n 'test_cfg specified in both outer field and model field '\n return build(cfg, SEGMENTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))"
},
{
"identifier": "collect_env",
"path": "mmseg/utils/collect_env.py",
"snippet": "def collect_env():\n \"\"\"Collect the information of the running environments.\"\"\"\n env_info = collect_base_env()\n env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'\n\n return env_info"
},
{
"identifier": "get_root_logger",
"path": "mmseg/utils/logger.py",
"snippet": "def get_root_logger(log_file=None, log_level=logging.INFO):\n \"\"\"Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added. The name of the root logger is the top-level package name,\n e.g., \"mmseg\".\n\n Args:\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n \"Error\" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n \"\"\"\n\n logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)\n\n return logger"
},
{
"identifier": "MiT_SegFormer",
"path": "seg_core/model.py",
"snippet": "class MiT_SegFormer(nn.Module):\n def __init__(self, backbone, num_classes=20, embedding_dim=256, pretrained=None):\n super().__init__()\n self.num_classes = num_classes\n self.embedding_dim = embedding_dim\n self.feature_strides = [4, 8, 16, 32]\n # self.in_channels = [32, 64, 160, 256]\n # self.in_channels = [64, 128, 320, 512]\n\n self.encoder = getattr(mix_transformer, backbone)()\n self.in_channels = self.encoder.embed_dims\n mit_num = backbone.split('_')[1][1]\n ## initilize encoder\n if pretrained:\n state_dict = torch.load('/home/najm/DualTeacher/pretrained/mit_b' + mit_num + '.pth')\n state_dict.pop('head.weight')\n state_dict.pop('head.bias')\n self.encoder.load_state_dict(state_dict, )\n\n self.decoder = SegFormerHead(feature_strides=self.feature_strides, in_channels=self.in_channels, embedding_dim=self.embedding_dim, num_classes=self.num_classes)\n\n self.classifier = nn.Conv2d(in_channels=self.in_channels[-1], out_channels=self.num_classes, kernel_size=1, bias=False)\n\n def _forward_cam(self, x):\n\n cam = F.conv2d(x, self.classifier.weight)\n cam = F.relu(cam)\n\n return cam\n\n def get_param_groups(self):\n\n param_groups = [[], [], []] #\n\n for name, param in list(self.encoder.named_parameters()):\n if \"norm\" in name:\n param_groups[1].append(param)\n else:\n param_groups[0].append(param)\n\n for param in list(self.decoder.parameters()):\n param_groups[2].append(param)\n\n param_groups[2].append(self.classifier.weight)\n\n return param_groups\n\n def forward(self, x):\n\n _x = self.encoder(x)\n _x1, _x2, _x3, _x4 = _x\n cls = self.classifier(_x4)\n return self.decoder(_x)"
},
{
"identifier": "PolyWarmupAdamW",
"path": "seg_core/optimizer.py",
"snippet": "class PolyWarmupAdamW(torch.optim.AdamW):\n\n def __init__(self, params, lr, weight_decay, betas, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None):\n super().__init__(params, lr=lr, betas=betas, weight_decay=weight_decay, eps=1e-8)\n\n self.global_step = 0\n self.warmup_iter = warmup_iter\n self.warmup_ratio = warmup_ratio\n self.max_iter = max_iter\n self.power = power\n\n self.__init_lr = [group['lr'] for group in self.param_groups]\n\n def step(self, closure=None):\n ## adjust lr\n if self.global_step < self.warmup_iter:\n\n lr_mult = 1 - (1 - self.global_step / self.warmup_iter) * (1 - self.warmup_ratio)\n for i in range(len(self.param_groups)):\n self.param_groups[i]['lr'] = self.__init_lr[i] * lr_mult\n\n elif self.global_step < self.max_iter:\n\n lr_mult = (1 - self.global_step / self.max_iter) ** self.power\n for i in range(len(self.param_groups)):\n self.param_groups[i]['lr'] = self.__init_lr[i] * lr_mult\n\n # step\n super().step(closure)\n\n self.global_step += 1"
},
{
"identifier": "ClassMixLoss",
"path": "seg_core/augmentations.py",
"snippet": "class ClassMixLoss(nn.Module):\n def __init__(self, weight=None, reduction=None, ignore_index=None):\n super(ClassMixLoss, self).__init__()\n self.CE = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)\n\n def forward(self, output, target, pixel_weight):\n loss = self.CE(output, target)\n loss = torch.mean(loss * pixel_weight)\n return loss"
},
{
"identifier": "compute_classmix",
"path": "seg_core/augmentations.py",
"snippet": "def compute_classmix(b, h, w, criterion, cm_loss_fn, model, ema_model, imgs, labels, unsup_imgs, image_u_strong, threshold):\n # Unlabeled Process\n with torch.no_grad():\n logits_occluder = ema_model(unsup_imgs) # 129\n logits_occluder = F.interpolate(logits_occluder, (h, w), mode=\"bilinear\", align_corners=False) # 513\n softmax_occluder = torch.softmax(logits_occluder, dim=1)\n max_prob_occluder, argmax_occluder = torch.max(softmax_occluder, dim=1)\n\n binary_mask = get_bin_mask(b, argmax_occluder)\n binary_mask = binary_mask.squeeze(dim=1)\n if b == 2:\n shuffle_index = torch.tensor([1, 0])\n else:\n shuffle_index = torch.randperm(b).cuda()\n class_mixed_img = class_mix(occluder_mask=binary_mask, occluder=image_u_strong, occludee=image_u_strong[shuffle_index])\n\n num_labeled = len(imgs)\n outputs = model(torch.cat([imgs, class_mixed_img]))\n outputs, outputs_u = outputs[:num_labeled], outputs[num_labeled:]\n\n pred_large = F.interpolate(outputs, size=labels.shape[1:], mode='bilinear', align_corners=False)\n sup_loss = criterion(pred_large, labels.type(torch.long).clone())\n del outputs, pred_large\n torch.cuda.empty_cache()\n logits_class_mixed = F.interpolate(outputs_u, (h, w), mode=\"bilinear\", align_corners=False)\n\n class_mixed_softmax = class_mix(occluder_mask=binary_mask, occluder=softmax_occluder, occludee=softmax_occluder[shuffle_index])\n max_prob_occluder, pseudo_label = torch.max(class_mixed_softmax, dim=1)\n\n unlabeled_weight = torch.sum(max_prob_occluder.ge(threshold).long() == 1).item() / np.size(np.array(pseudo_label.cpu()))\n pixel_weight = unlabeled_weight * torch.ones(max_prob_occluder.shape).cuda()\n\n class_mix_loss = cm_loss_fn(logits_class_mixed, pseudo_label, pixel_weight)\n loss = sup_loss + class_mix_loss\n return loss"
},
{
"identifier": "compute_cutmix",
"path": "seg_core/augmentations.py",
"snippet": "def compute_cutmix(h, w, imgs, labels, criterion, model, ema_model, image_u, threshold):\n with torch.no_grad():\n pred = ema_model(image_u)\n pred = F.interpolate(pred, (h, w), mode=\"bilinear\", align_corners=False)\n pred = F.softmax(pred, dim=1)\n pred_logit, pred_label = torch.max(pred, dim=1)\n\n image_aug, label_aug = cut_mixer(image_u, pred_label.clone())\n\n image_aug, label_aug, pred_logit = \\\n batch_transform(image_aug, label_aug, pred_logit,\n crop_size=(pred_logit.shape[1], pred_logit.shape[2]), scale_size=(1.0, 1.0), apply_augmentation=True)\n\n num_labeled = len(imgs)\n outputs = model(torch.cat([imgs, image_aug]))\n outputs, outputs_u = outputs[:num_labeled], outputs[num_labeled:]\n pred_large = F.interpolate(outputs, size=labels.shape[1:], mode='bilinear', align_corners=False)\n sup_loss = criterion(pred_large, labels.type(torch.long).clone())\n\n pred_u = F.interpolate(outputs_u, (h, w), mode=\"bilinear\", align_corners=False)\n\n cutmix_loss = compute_unsupervised_loss(pred_u, label_aug.clone(), pred_logit, threshold)\n return sup_loss + cutmix_loss"
},
{
"identifier": "compute_ic",
"path": "seg_core/augmentations.py",
"snippet": "def compute_ic(model, ema_model, image_u, image_u_strong, criterion_u, label_u, h, w, threshold):\n with torch.no_grad():\n logits = ema_model(image_u) # 129\n logits = F.interpolate(logits, (h, w), mode=\"bilinear\", align_corners=False) # 513\n softmax_out = torch.softmax(logits, dim=1)\n max_probs, argmax_label = torch.max(softmax_out, dim=1)\n pred_dc = model(image_u_strong)\n pred_dc = F.interpolate(pred_dc, (h, w), mode=\"bilinear\", align_corners=False) # 513\n loss_dc = criterion_u(pred_dc, argmax_label)\n loss_dc = loss_dc * ((max_probs >= threshold) & (label_u != 255))\n loss_dc = loss_dc.sum() / (label_u != 255).sum().item()\n return loss_dc.clone()"
},
{
"identifier": "single_gpu_test",
"path": "mmseg/apis/test.py",
"snippet": "def single_gpu_test(model,\n data_loader,\n show=False,\n out_dir=None,\n efficient_test=False):\n \"\"\"Test with single GPU.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (utils.data.Dataloader): Pytorch data loader.\n show (bool): Whether show results during infernece. Default: False.\n out_dir (str, optional): If specified, the results will be dumped into\n the directory to save output results.\n efficient_test (bool): Whether save the results as local numpy files to\n save CPU memory during evaluation. Default: False.\n\n Returns:\n list: The prediction results.\n \"\"\"\n\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, **data)\n\n if show or out_dir:\n img_tensor = data['img'][0]\n img_metas = data['img_metas'][0].data[0]\n imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])\n assert len(imgs) == len(img_metas)\n\n for img, img_meta in zip(imgs, img_metas):\n h, w, _ = img_meta['img_shape']\n img_show = img[:h, :w, :]\n\n ori_h, ori_w = img_meta['ori_shape'][:-1]\n img_show = mmcv.imresize(img_show, (ori_w, ori_h))\n\n if out_dir:\n out_file = osp.join(out_dir, img_meta['ori_filename'])\n else:\n out_file = None\n\n model.module.show_result(\n img_show,\n result,\n palette=dataset.PALETTE,\n show=show,\n out_file=out_file)\n\n if isinstance(result, list):\n if efficient_test:\n result = [np2tmp(_) for _ in result]\n results.extend(result)\n else:\n if efficient_test:\n result = np2tmp(result)\n results.append(result)\n\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size):\n prog_bar.update()\n return results"
}
] | import argparse
import copy
import os
import os.path as osp
import time
import logging
import mmcv
import torch
import numpy as np
import seg_core.eval_seg as eval_seg
import torch.nn.functional as F
import warnings
import torch.distributed as dist
import random
import tempfile
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from torchvision.transforms import ToTensor
from mmseg import __version__
from mmseg.apis import set_random_seed, train_segmentor
from mmseg.datasets import build_dataset, build_dataloader
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
from seg_core.model import MiT_SegFormer
from seg_core.optimizer import PolyWarmupAdamW
from seg_core.augmentations import ClassMixLoss, compute_classmix, compute_cutmix, compute_ic
from torchvision.utils import save_image
from dist_helper import setup_distributed
from mmseg.apis import single_gpu_test
from mmcv.image import tensor2imgs
from PIL import Image, ImageOps, ImageFilter
from torchvision import transforms
from copy import deepcopy | 6,075 | """
Dual-Teacher
Copyright (c) 2023-present NAVER Cloud Corp.
distributed under NVIDIA Source Code License for SegFormer
--------------------------------------------------------
References:
SegFormer: https://github.com/NVlabs/SegFormer
--------------------------------------------------------
"""
warnings.filterwarnings("ignore")
criterion_u = torch.nn.CrossEntropyLoss(reduction='none').cuda()
def train_sup(args, model, optimizer, train_loader, val_loader, criterion, max_iters, print_iters, eval_iters):
train_iterator = iter(train_loader)
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
for epoch in range(200):
for i in range(len(train_loader)):
model.train()
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
outputs = model(image)
outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
seg_loss = criterion(outputs, label.type(torch.long))
optimizer.zero_grad()
seg_loss.backward()
optimizer.step()
if rank == 0:
lr = optimizer.param_groups[0]['lr']
logging.info("save_path:{}".format(args.save_path))
logging.info("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
print("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
logging.info('[iter:{}] Validation:'.format(i + 1))
print('[iter:{}] Validation:'.format(i + 1))
val_score = val(model.module, val_loader)
logging.info('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
print('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
model.train()
def train_dual(args, model, model_teacher, model_teacher2, optimizer, train_loader, train_loader_u, val_loader, criterion, cm_loss_fn, max_iters, print_iters, eval_iters):
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
best_miou, best_epoch = 0, 0
for epoch in range(200):
model.train()
train_loader.sampler.set_epoch(epoch)
train_loader_u.sampler.set_epoch(epoch)
train_iterator = iter(train_loader)
train_iterator_u = iter(train_loader_u)
if epoch % 2 == 0:
ema_model = model_teacher
do_cut_mix = True
do_class_mix = False
else:
ema_model = model_teacher2
do_cut_mix = False
do_class_mix = True
ema_model.train()
for i in range(len(train_loader)):
try:
batch_data_u = next(train_iterator_u)
except:
train_iterator_u = iter(train_loader_u)
batch_data_u = next(train_iterator_u)
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
image_u = batch_data_u['img'].data[0].cuda(non_blocking=True)
label_u = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
b, _, h, w = image.shape
image_u_strong = deepcopy(image_u)
image_u_strong = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(image_u_strong)
image_u_strong = transforms.RandomGrayscale(p=0.2)(image_u_strong)
if do_class_mix:
loss = compute_classmix(b, h, w, criterion, cm_loss_fn, model, ema_model, image, label, image_u, image_u_strong, threshold=0.95)
if do_cut_mix:
| """
Dual-Teacher
Copyright (c) 2023-present NAVER Cloud Corp.
distributed under NVIDIA Source Code License for SegFormer
--------------------------------------------------------
References:
SegFormer: https://github.com/NVlabs/SegFormer
--------------------------------------------------------
"""
warnings.filterwarnings("ignore")
criterion_u = torch.nn.CrossEntropyLoss(reduction='none').cuda()
def train_sup(args, model, optimizer, train_loader, val_loader, criterion, max_iters, print_iters, eval_iters):
train_iterator = iter(train_loader)
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
for epoch in range(200):
for i in range(len(train_loader)):
model.train()
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
outputs = model(image)
outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
seg_loss = criterion(outputs, label.type(torch.long))
optimizer.zero_grad()
seg_loss.backward()
optimizer.step()
if rank == 0:
lr = optimizer.param_groups[0]['lr']
logging.info("save_path:{}".format(args.save_path))
logging.info("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
print("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
logging.info('[iter:{}] Validation:'.format(i + 1))
print('[iter:{}] Validation:'.format(i + 1))
val_score = val(model.module, val_loader)
logging.info('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
print('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
model.train()
def train_dual(args, model, model_teacher, model_teacher2, optimizer, train_loader, train_loader_u, val_loader, criterion, cm_loss_fn, max_iters, print_iters, eval_iters):
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
best_miou, best_epoch = 0, 0
for epoch in range(200):
model.train()
train_loader.sampler.set_epoch(epoch)
train_loader_u.sampler.set_epoch(epoch)
train_iterator = iter(train_loader)
train_iterator_u = iter(train_loader_u)
if epoch % 2 == 0:
ema_model = model_teacher
do_cut_mix = True
do_class_mix = False
else:
ema_model = model_teacher2
do_cut_mix = False
do_class_mix = True
ema_model.train()
for i in range(len(train_loader)):
try:
batch_data_u = next(train_iterator_u)
except:
train_iterator_u = iter(train_loader_u)
batch_data_u = next(train_iterator_u)
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
image_u = batch_data_u['img'].data[0].cuda(non_blocking=True)
label_u = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
b, _, h, w = image.shape
image_u_strong = deepcopy(image_u)
image_u_strong = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(image_u_strong)
image_u_strong = transforms.RandomGrayscale(p=0.2)(image_u_strong)
if do_class_mix:
loss = compute_classmix(b, h, w, criterion, cm_loss_fn, model, ema_model, image, label, image_u, image_u_strong, threshold=0.95)
if do_cut_mix: | loss = compute_cutmix(h, w, image, label, criterion, model, ema_model, image_u, threshold=0.95) | 12 | 2023-10-19 04:04:31+00:00 | 8k |
Azure/azure-openai-benchmark | benchmark/loadcmd.py | [
{
"identifier": "AsyncHTTPExecuter",
"path": "benchmark/asynchttpexecuter.py",
"snippet": "class AsyncHTTPExecuter:\n \"\"\"\n An implementation of an async HTTP executer class with rate limiting and\n concurrency control.\n \"\"\"\n def __init__(self, async_http_func: Callable[[aiohttp.ClientSession], None], rate_limiter=NoRateLimiter(), max_concurrency=12):\n \"\"\"\n Creates a new executer.\n :param async_http_func: A callable function that takes aiohttp.ClientSession to use to perform request.\n :param rate_limiter: Rate limiter object to use, defaults to NoRateLimiter.\n :param max_concurrency: Maximum number of concurrent requests, defaults to 12.\n \"\"\"\n self.async_http_func = async_http_func\n self.rate_limiter = rate_limiter\n self.max_concurrency = max_concurrency\n self.max_lag_warn = timedelta(seconds=5).seconds\n self.terminate = False\n\n def run(self, call_count=None, duration=None):\n \"\"\"\n Runs the executer. If call_count and duration not specified, it will run until cancelled.\n :param call_count: Number of calls to execute, default infinite.\n :param duration: Duration in second for the run, default infinite.\n \"\"\"\n asyncio.run(self._run(call_count=call_count, duration=duration))\n\n async def _run(self, call_count=None, duration=None):\n orig_sigint_handler = signal.signal(signal.SIGINT, self._terminate)\n orig_sigterm_handler = signal.signal(signal.SIGTERM, self._terminate)\n # disable all TCP limits for highly parallel loads\n conn = aiohttp.TCPConnector(limit=0)\n async with aiohttp.ClientSession(connector=conn) as session:\n start_time = time.time()\n calls_made = 0\n request_tasks = set()\n while (call_count is None or calls_made < call_count) and (duration is None or (time.time() - start_time) < duration) and not self.terminate:\n async with self.rate_limiter:\n if len(request_tasks) > self.max_concurrency:\n wait_start_time = time.time()\n _, crs_pending = await asyncio.wait(request_tasks, return_when=asyncio.FIRST_COMPLETED)\n request_tasks = crs_pending\n waited = time.time() - wait_start_time\n if waited > LAG_WARN_DURATION and type(self.rate_limiter) is not NoRateLimiter:\n logging.warning(f\"falling behind committed rate by {round(waited, 3)}s, consider increasing number of clients.\")\n v = asyncio.create_task(self.async_http_func(session))\n request_tasks.add(v)\n calls_made += 1\n\n if len(request_tasks) > 0:\n logging.info(f\"waiting for {len(request_tasks)} requests to drain\")\n await asyncio.wait(request_tasks)\n\n signal.signal(signal.SIGINT, orig_sigint_handler)\n signal.signal(signal.SIGTERM, orig_sigterm_handler)\n\n def _terminate(self, *args):\n if not self.terminate:\n logging.warning(\"got terminate signal, draining. signal again to exit immediately.\")\n self.terminate = True\n else:\n logging.info(\"forcing program exit\")\n os._exit(0)"
},
{
"identifier": "OAIRequester",
"path": "benchmark/oairequester.py",
"snippet": "class OAIRequester:\n \"\"\"\n A simple AOAI requester that makes a streaming call and collect corresponding\n statistics.\n :param api_key: Azure OpenAI resource endpoint key.\n :param url: Full deployment URL in the form of https://<resource>.openai.azure.com/openai/deployments/<deployment>/chat/completins?api-version=<api_version>\n :param timeout: Timeout for each request.\n \"\"\"\n def __init__(self, api_key: str, url: str, timeout=None, backoff=False):\n self.api_key = api_key\n self.url = url\n self.timeout = timeout\n self.backoff = backoff\n\n async def call(self, session:aiohttp.ClientSession, body: dict) -> RequestStats:\n \"\"\"\n Makes a single call with body and returns statistics. The function\n forces the request in streaming mode to be able to collect token\n generation latency.\n In case of failure, if the status code is 429 due to throttling, value\n of header retry-after-ms will be honored. Otherwise, request\n will be retried with an exponential backoff.\n Any other non-200 status code will fail immediately.\n\n :param body: json request body.\n :return RequestStats.\n \"\"\"\n stats = RequestStats()\n # operate only in streaming mode so we can collect token stats.\n body[\"stream\"] = True\n try:\n await self._call(session, body, stats)\n except Exception as e:\n stats.last_exception = e\n\n return stats\n\n @backoff.on_exception(backoff.expo,\n aiohttp.ClientError,\n jitter=backoff.full_jitter,\n max_time=MAX_RETRY_SECONDS,\n giveup=_terminal_http_code)\n async def _call(self, session:aiohttp.ClientSession, body: dict, stats: RequestStats):\n headers = {\n \"api-key\": self.api_key,\n \"Content-Type\": \"application/json\",\n TELEMETRY_USER_AGENT_HEADER: USER_AGENT,\n }\n stats.request_start_time = time.time()\n while time.time() - stats.request_start_time < MAX_RETRY_SECONDS:\n stats.calls += 1\n response = await session.post(self.url, headers=headers, json=body)\n stats.response_status_code = response.status\n # capture utilization in all cases, if found\n self._read_utilization(response, stats)\n if response.status != 429:\n break\n if RETRY_AFTER_MS_HEADER in response.headers:\n try:\n retry_after_str = response.headers[RETRY_AFTER_MS_HEADER]\n retry_after_ms = float(retry_after_str)\n logging.debug(f\"retry-after sleeping for {retry_after_ms}ms\")\n await asyncio.sleep(retry_after_ms/1000.0)\n except ValueError as e:\n logging.warning(f\"unable to parse retry-after header value: {UTILIZATION_HEADER}={retry_after_str}: {e}\") \n # fallback to backoff\n break\n else:\n # fallback to backoff\n break\n\n if response.status != 200 and response.status != 429:\n logging.warning(f\"call failed: {REQUEST_ID_HEADER}={response.headers[REQUEST_ID_HEADER]} {response.status}: {response.reason}\")\n if self.backoff:\n response.raise_for_status()\n if response.status == 200:\n await self._handle_response(response, stats)\n \n async def _handle_response(self, response: aiohttp.ClientResponse, stats: RequestStats):\n async with response:\n stats.response_time = time.time()\n async for line in response.content:\n if not line.startswith(b'data:'):\n continue\n if stats.first_token_time is None:\n stats.first_token_time = time.time()\n if stats.generated_tokens is None:\n stats.generated_tokens = 0\n stats.generated_tokens += 1\n stats.response_end_time = time.time()\n\n def _read_utilization(self, response: aiohttp.ClientResponse, stats: RequestStats):\n if UTILIZATION_HEADER in response.headers:\n util_str = response.headers[UTILIZATION_HEADER]\n if len(util_str) == 0:\n logging.warning(f\"got empty utilization header {UTILIZATION_HEADER}\")\n elif util_str[-1] != '%':\n logging.warning(f\"invalid utilization header value: {UTILIZATION_HEADER}={util_str}\")\n else:\n try:\n stats.deployment_utilization = float(util_str[:-1])\n except ValueError as e:\n logging.warning(f\"unable to parse utilization header value: {UTILIZATION_HEADER}={util_str}: {e}\") "
},
{
"identifier": "num_tokens_from_messages",
"path": "benchmark/oaitokenizer.py",
"snippet": "def num_tokens_from_messages(messages, model):\n \"\"\"Return the number of tokens used by a list of messages.\"\"\"\n\n encoding = tiktoken.encoding_for_model(model)\n\n if model in {\n \"gpt-3.5-turbo-0613\",\n \"gpt-3.5-turbo-16k-0613\",\n \"gpt-4-0314\",\n \"gpt-4-32k-0314\",\n \"gpt-4-0613\",\n \"gpt-4-32k-0613\",\n }:\n tokens_per_message = 3\n tokens_per_name = 1\n elif model == \"gpt-3.5-turbo-0301\":\n tokens_per_message = 4 # every message follows <|start|>{role/name}\\n{content}<|end|>\\n\n tokens_per_name = -1 # if there's a name, the role is omitted\n elif \"gpt-3.5-turbo\" in model:\n logging.warn(\"Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.\")\n return num_tokens_from_messages(messages, model=\"gpt-3.5-turbo-0613\")\n elif \"gpt-4\" in model:\n logging.warn(\"Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.\")\n return num_tokens_from_messages(messages, model=\"gpt-4-0613\")\n else:\n raise NotImplementedError(\n f\"\"\"num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.\"\"\"\n )\n num_tokens = 0\n for message in messages:\n num_tokens += tokens_per_message\n for key, value in message.items():\n num_tokens += len(encoding.encode(value))\n if key == \"name\":\n num_tokens += tokens_per_name\n num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>\n return num_tokens"
},
{
"identifier": "NoRateLimiter",
"path": "benchmark/ratelimiting.py",
"snippet": "class NoRateLimiter:\n \"\"\"\n Dummy rate limiter that does not impose any limits.\n \"\"\"\n async def __aenter__(self):\n pass\n async def __aexit__(self, *args):\n pass"
},
{
"identifier": "RateLimiter",
"path": "benchmark/ratelimiting.py",
"snippet": "class RateLimiter:\n \"\"\"\n Simple rate limiter.\n \"\"\"\n def __init__(self, calls: int, period: float):\n \"\"\"\n Create a new RateLimiter with restricted calls per period. The implementation\n uses simple linear rate estimator.\n \"\"\"\n self.calls = collections.deque()\n self.period = period\n self.max_calls = calls\n\n async def __aenter__(self):\n sleep_time = 0\n if len(self.calls) >= self.max_calls:\n sleep_time = self.period - self._timespan()\n elif len(self.calls) > 1:\n sleep_time = (self.period - self._timespan()) / (math.ceil(self.max_calls * RATE_ESTIMATOR_BURST_FACTOR) - len(self.calls))\n\n if sleep_time > 0:\n await asyncio.sleep(sleep_time)\n return self\n\n async def __aexit__(self, *args):\n self.calls.append(time.time())\n while self._timespan() >= self.period:\n self.calls.popleft()\n\n def _timespan(self):\n return self.calls[-1] - self.calls[0]"
},
{
"identifier": "_StatsAggregator",
"path": "benchmark/statsaggregator.py",
"snippet": "class _StatsAggregator(threading.Thread):\n \"\"\"\n A thread-safe request stats aggregator that can periodically emit statistics.\n \"\"\"\n lock = threading.Lock()\n terminate: threading.Event\n\n start_time: float = 0\n processing_requests_count: int = 0\n total_requests_count: int = 0\n total_failed_count: int = 0\n throttled_count: int = 0\n\n request_timestamps = _Samples()\n request_latency = _Samples()\n call_tries = _Samples()\n response_latencies = _Samples()\n first_token_latencies = _Samples()\n token_latencies = _Samples()\n context_tokens = _Samples()\n generated_tokens = _Samples()\n utilizations = _Samples()\n\n def __init__(self, clients:int, dump_duration:float=5, window_duration:float=60, json_output=False, *args,**kwargs):\n \"\"\"\n :param clients: number of clients used in testing\n :param dump_duration: duration in seconds to dump current aggregates.\n :param window_duration: duration of sliding window in second to consider for aggregation.\n :param json_output: whether to dump periodic stats as json or human readable.\n \"\"\"\n self.clients = clients\n self.dump_duration = dump_duration\n self.json_output = json_output\n self.window_duration = window_duration\n\n super(_StatsAggregator, self).__init__(*args, **kwargs)\n\n def run(self):\n \"\"\"\n Start the periodic aggregator. Use stop() to stop.\n \"\"\"\n self.start_time = time.time()\n self.terminate = threading.Event()\n while not self.terminate.wait(self.dump_duration):\n self._dump()\n self._slide_window()\n\n def stop(self):\n self.terminate.set()\n # Dump one more time to ensure we include the final request\n self._dump()\n\n def record_new_request(self):\n \"\"\"\n Records a new request, so that the number of processing requests is known.\n \"\"\"\n with self.lock:\n self.processing_requests_count += 1\n\n def aggregate_request(self, stats: RequestStats):\n \"\"\"\n Aggregates request stat within the sliding window.\n :param stats: request stats object.\n \"\"\"\n with self.lock:\n self.processing_requests_count -= 1\n self.total_requests_count += 1\n self.call_tries._append(stats.request_start_time, stats.calls)\n if stats.response_status_code != 200:\n self.total_failed_count += 1\n if stats.response_status_code == 429:\n self.throttled_count += 1\n else:\n request_latency = stats.response_end_time - stats.request_start_time\n self.request_latency._append(stats.request_start_time, request_latency)\n if request_latency > self.window_duration:\n logging.warning((\n f\"request completed in {round(request_latency, 2)} seconds, while aggregation-window is {round(self.window_duration, 2)} \"\n \"seconds, consider increasing aggregation-window to at least 2x your typical request latency.\"\n )\n ) \n self.request_timestamps._append(stats.request_start_time, stats.request_start_time)\n self.response_latencies._append(stats.request_start_time, stats.response_time - stats.request_start_time)\n self.first_token_latencies._append(stats.request_start_time, stats.first_token_time - stats.request_start_time)\n self.token_latencies._append(stats.request_start_time, (stats.response_end_time - stats.first_token_time) / stats.generated_tokens)\n self.context_tokens._append(stats.request_start_time, stats.context_tokens)\n self.generated_tokens._append(stats.request_start_time, stats.generated_tokens)\n if stats.deployment_utilization is not None:\n self.utilizations._append(stats.request_start_time, stats.deployment_utilization)\n\n def _dump(self):\n with self.lock:\n run_seconds = round(time.time() - self.start_time)\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n e2e_latency_avg = round(np.average(self.request_latency._values()), 3) if self.request_latency._len() > 0 else \"n/a\"\n e2e_latency_95th = round(np.percentile(self.request_latency._values(), 95), 3) if self.request_latency._len() > 1 else \"n/a\"\n context_per_minute = round(60.0 * np.sum(self.context_tokens._values()) / self.window_duration, 0) if self.context_tokens._len() > 0 else \"n/a\"\n gen_per_minute = round(60.0 * np.sum(self.generated_tokens._values()) / self.window_duration, 0) if self.generated_tokens._len() > 0 else \"n/a\"\n tokens_per_minute = 0\n if context_per_minute != \"n/a\":\n tokens_per_minute += context_per_minute\n if gen_per_minute != \"n/a\":\n tokens_per_minute += gen_per_minute\n ttft_avg = round(np.average(self.first_token_latencies._values()), 3) if self.first_token_latencies._len() > 0 else \"n/a\"\n ttft_95th = round(np.percentile(self.first_token_latencies._values(), 95), 3) if self.first_token_latencies._len() > 1 else \"n/a\"\n tbt_avg = round(np.average(self.token_latencies._values()), 3) if self.token_latencies._len() > 0 else \"n/a\"\n tbt_95th = round(np.percentile(self.token_latencies._values(), 95), 3) if self.token_latencies._len() > 1 else \"n/a\"\n util_avg = f\"{round(np.average(self.utilizations._values()), 1)}%\" if self.utilizations._len() > 0 else \"n/a\"\n util_95th = f\"{round(np.percentile(self.utilizations._values(), 95), 1)}%\" if self.utilizations._len() > 1 else \"n/a\"\n rpm = round(60.0 * self.request_timestamps._len() / self.window_duration, 1) if self.request_timestamps._len() > 0 else \"n/a\"\n # Handle the 1x extra processing_request due to next request being queued\n processing_requests_count = min(self.clients, self.processing_requests_count)\n if self.json_output:\n j = {\n \"run_seconds\": run_seconds,\n \"timestamp\": timestamp,\n \"rpm\": rpm,\n \"processing\": processing_requests_count,\n \"completed\": self.total_requests_count,\n \"failures\": self.total_failed_count,\n \"throttled\": self.throttled_count,\n \"requests\": self.total_requests_count,\n \"tpm\": {\n \"context\": context_per_minute,\n \"gen\": gen_per_minute,\n \"total\": tokens_per_minute,\n },\n \"e2e\": {\n \"avg\": e2e_latency_avg,\n \"95th\": e2e_latency_95th,\n },\n \"ttft\": {\n \"avg\": ttft_avg,\n \"95th\": ttft_95th,\n },\n \"tbt\": {\n \"avg\": tbt_avg,\n \"95th\": tbt_95th,\n },\n \"util\": {\n \"avg\": util_avg,\n \"95th\": util_95th,\n },\n }\n print(json.dumps(j), flush=True)\n else:\n print(f\"{timestamp} rpm: {rpm:<5} processing: {processing_requests_count:<4} completed: {self.total_requests_count:<5} failures: {self.total_failed_count:<4} throttled: {self.throttled_count:<4} requests: {self.total_requests_count:<5} tpm: {tokens_per_minute:<6} ttft_avg: {ttft_avg:<6} ttft_95th: {ttft_95th:<6} tbt_avg: {tbt_avg:<6} tbt_95th: {tbt_95th:<6} e2e_avg: {e2e_latency_avg:<6} e2e_95th: {e2e_latency_95th:<6} util_avg: {util_avg:<6} util_95th: {util_95th:<6}\", flush=True)\n\n def _slide_window(self):\n with self.lock:\n self.call_tries._trim_oldest(self.window_duration)\n self.request_timestamps._trim_oldest(self.window_duration)\n self.response_latencies._trim_oldest(self.window_duration)\n self.first_token_latencies._trim_oldest(self.window_duration)\n self.token_latencies._trim_oldest(self.window_duration)\n self.context_tokens._trim_oldest(self.window_duration)\n self.generated_tokens._trim_oldest(self.window_duration)\n self.utilizations._trim_oldest(self.window_duration)"
}
] | import logging
import math
import os
import sys
import time
import aiohttp
import wonderwords
from typing import Iterable, Iterator
from .asynchttpexecuter import AsyncHTTPExecuter
from .oairequester import OAIRequester
from .oaitokenizer import num_tokens_from_messages
from .ratelimiting import NoRateLimiter, RateLimiter
from .statsaggregator import _StatsAggregator | 5,318 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
class _RequestBuilder:
"""
Wrapper iterator class to build request payloads.
"""
def __init__(self, model:str, context_tokens:int,
max_tokens:None,
completions:None,
frequence_penalty:None,
presence_penalty:None,
temperature:None,
top_p:None):
self.model = model
self.context_tokens = context_tokens
self.max_tokens = max_tokens
self.completions = completions
self.frequency_penalty = frequence_penalty
self.presence_penalty = presence_penalty
self.temperature = temperature
self.top_p = top_p
logging.info("warming up prompt cache")
_generate_messages(self.model, self.context_tokens, self.max_tokens)
def __iter__(self) -> Iterator[dict]:
return self
def __next__(self) -> (dict, int):
messages, messages_tokens = _generate_messages(self.model, self.context_tokens, self.max_tokens)
body = {"messages":messages}
if self.max_tokens is not None:
body["max_tokens"] = self.max_tokens
if self.completions is not None:
body["n"] = self.completions
if self.frequency_penalty is not None:
body["frequency_penalty"] = self.frequency_penalty
if self.presence_penalty is not None:
body["presenece_penalty"] = self.presence_penalty
if self.temperature is not None:
body["temperature"] = self.temperature
if self.top_p is not None:
body["top_p"] = self.top_p
return body, messages_tokens
def load(args):
try:
_validate(args)
except ValueError as e:
print(f"invalid argument(s): {e}")
sys.exit(1)
api_key = os.getenv(args.api_key_env)
url = args.api_base_endpoint[0] + "/openai/deployments/" + args.deployment + "/chat/completions"
url += "?api-version=" + args.api_version
rate_limiter = NoRateLimiter()
if args.rate is not None and args.rate > 0:
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
class _RequestBuilder:
"""
Wrapper iterator class to build request payloads.
"""
def __init__(self, model:str, context_tokens:int,
max_tokens:None,
completions:None,
frequence_penalty:None,
presence_penalty:None,
temperature:None,
top_p:None):
self.model = model
self.context_tokens = context_tokens
self.max_tokens = max_tokens
self.completions = completions
self.frequency_penalty = frequence_penalty
self.presence_penalty = presence_penalty
self.temperature = temperature
self.top_p = top_p
logging.info("warming up prompt cache")
_generate_messages(self.model, self.context_tokens, self.max_tokens)
def __iter__(self) -> Iterator[dict]:
return self
def __next__(self) -> (dict, int):
messages, messages_tokens = _generate_messages(self.model, self.context_tokens, self.max_tokens)
body = {"messages":messages}
if self.max_tokens is not None:
body["max_tokens"] = self.max_tokens
if self.completions is not None:
body["n"] = self.completions
if self.frequency_penalty is not None:
body["frequency_penalty"] = self.frequency_penalty
if self.presence_penalty is not None:
body["presenece_penalty"] = self.presence_penalty
if self.temperature is not None:
body["temperature"] = self.temperature
if self.top_p is not None:
body["top_p"] = self.top_p
return body, messages_tokens
def load(args):
try:
_validate(args)
except ValueError as e:
print(f"invalid argument(s): {e}")
sys.exit(1)
api_key = os.getenv(args.api_key_env)
url = args.api_base_endpoint[0] + "/openai/deployments/" + args.deployment + "/chat/completions"
url += "?api-version=" + args.api_version
rate_limiter = NoRateLimiter()
if args.rate is not None and args.rate > 0: | rate_limiter = RateLimiter(args.rate, 60) | 4 | 2023-10-19 00:52:26+00:00 | 8k |
pytest-visual/pytest-visual | visual/interface.py | [
{
"identifier": "correct_layout",
"path": "visual/lib/convenience.py",
"snippet": "def correct_layout(image: np.ndarray, layout: str) -> np.ndarray:\n if layout[0] == \"1\":\n image = np.squeeze(image, axis=0)\n layout = layout[1:]\n if layout[0] == \"c\":\n image = np.moveaxis(image, 0, -1)\n layout = layout[1:] + \"c\"\n return image"
},
{
"identifier": "create_plot_from_images",
"path": "visual/lib/convenience.py",
"snippet": "def create_plot_from_images(\n images: List[np.ndarray],\n labels: Optional[List[str]],\n grid_shape: Tuple[int, int],\n height: float,\n) -> Figure:\n if labels is not None:\n assert len(labels) == len(images), \"Number of labels must match number of images\"\n rows, cols = grid_shape\n\n # Create multiple augmented images and add them to a grid\n figure = make_subplots(rows=rows, cols=cols, horizontal_spacing=0.05, vertical_spacing=0.05)\n for r in range(rows):\n for c in range(cols):\n i = r * cols + c\n if i < len(images):\n image = images[i]\n\n # Add image to grid\n subfig = px.imshow(image, binary_string=True)\n figure.update_layout(margin=dict(l=0, r=0, b=0, t=0))\n figure.add_trace(subfig.data[0], row=r + 1, col=c + 1)\n\n # Add label to image\n if labels is not None:\n # Add white background to text\n figure.add_annotation(\n xref=\"paper\",\n yref=\"paper\",\n x=0.5 * image.shape[1],\n y=0.9 * image.shape[0],\n text=labels[i],\n showarrow=False,\n font=dict(size=15, color=\"black\"),\n align=\"center\",\n bgcolor=\"white\",\n row=r + 1,\n col=c + 1,\n )\n\n # Remove axes\n for r in range(rows):\n for c in range(cols):\n i = r * cols + c\n\n if i < len(images):\n figure.update_xaxes(title_text=\"\", showticklabels=False, showgrid=False, row=r + 1, col=c + 1)\n figure.update_yaxes(title_text=\"\", showticklabels=False, showgrid=False, row=r + 1, col=c + 1)\n\n # Set height\n figure.update_layout(height=height)\n\n return figure"
},
{
"identifier": "get_grid_shape",
"path": "visual/lib/convenience.py",
"snippet": "def get_grid_shape(num_images: int, max_cols: int) -> Tuple[int, int]:\n \"\"\"\n Calculate the shape of the grid of images to show.\n \"\"\"\n rows = ceil_division(num_images, max_cols)\n cols = ceil_division(num_images, rows)\n return rows, cols"
},
{
"identifier": "get_image_max_value_from_type",
"path": "visual/lib/convenience.py",
"snippet": "def get_image_max_value_from_type(max_value: Optional[float], image: np.ndarray) -> float:\n \"\"\"\n Get or calculate the maximum value of the image.\n \"\"\"\n if max_value is not None:\n return max_value\n\n if image.dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.int16, np.int32, np.int64]:\n return 255.0\n if image.dtype in [np.float16, np.float32, np.float64]:\n return 1.0\n raise ValueError(f\"Could not determine max value from image with dtype {image.dtype}\")"
},
{
"identifier": "get_layout_from_image",
"path": "visual/lib/convenience.py",
"snippet": "def get_layout_from_image(layout: Optional[str], image: np.ndarray) -> str:\n \"\"\"\n Get or calculate the layout of the grid of images to show.\n\n Possible values: \"hwc\", \"chw\", \"hw\", \"1chw\", \"1hwc\"\n \"\"\"\n if layout is not None:\n return layout\n\n matched_layouts = [L for L in [\"hwc\", \"chw\", \"hw\", \"1chw\", \"1hwc\"] if layout_matches_image(L, image)]\n assert len(matched_layouts) == 1, f\"Could not determine layout from image with shape {image.shape}\"\n return matched_layouts[0]"
},
{
"identifier": "get_visualization_flags",
"path": "visual/lib/flags.py",
"snippet": "def get_visualization_flags(request: FixtureRequest) -> Tuple[bool, bool, bool]:\n \"\"\"\n Retrieves visualization flags from the pytest command line options. It checks which flags are set and ensures that they are mutually exclusive.\n\n Parameters:\n - request (FixtureRequest): A pytest request object containing configuration details and command line options.\n\n Returns:\n - Tuple[bool, bool, bool]: A tuple containing three boolean values corresponding to whether each flag is set:\n - run_visualization: True if visualization should be run (if --visual or --visual-yes-all is set).\n - yes_all: True if --visual-yes-all flag is set.\n - reset_all: True if --visual-reset-all flag is set.\n\n Raises:\n - AssertionError: If more than one of the flags is set.\n \"\"\"\n\n visualize = bool(request.config.getoption(\"--visual\"))\n yes_all = bool(request.config.getoption(\"--visual-yes-all\"))\n reset_all = bool(request.config.getoption(\"--visual-reset-all\"))\n\n assert visualize + yes_all + reset_all <= 1, \"Only one of --visual, --visual-yes-all, --visual-reset-all can be set\"\n\n run_visualization = visualize or yes_all\n return run_visualization, yes_all, reset_all"
},
{
"identifier": "pytest_addoption",
"path": "visual/lib/flags.py",
"snippet": "def pytest_addoption(parser: Parser):\n parser.addoption(\"--visual\", action=\"store_true\", help=\"Run visualization tests, prompt for acceptance\")\n parser.addoption(\"--visual-yes-all\", action=\"store_true\", help=\"Visualization tests are accepted without prompting\")\n parser.addoption(\"--visual-reset-all\", action=\"store_true\", help=\"Don't visualize, but mark all visualization cases as unaccepted\") # fmt: skip"
},
{
"identifier": "Statement",
"path": "visual/lib/storage.py",
"snippet": "def get_storage_path(request: FixtureRequest) -> Path:\ndef load_statements(storage_path: Path) -> Optional[List[Statement]]:\ndef store_statements(storage_path: Path, statements: List[Statement]) -> None:\ndef clear_statements(storage_path: Path) -> None:"
},
{
"identifier": "UI",
"path": "visual/lib/ui.py",
"snippet": "class UI:\n def __init__(self) -> None:\n \"\"\"\n Initializes the UI object, setting up the app, the layout, and starting a thread to run the server.\n The callbacks for the interactive elements in the UI are also defined within this method.\n \"\"\"\n self.app = Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\n self.app.layout = self._draw_initial_layout()\n\n self._render_blank()\n\n self.thread = threading.Thread(\n target=self.app.run_server, kwargs={\"debug\": False, \"use_reloader\": False, \"port\": port_number}\n )\n self.thread.daemon = True\n self.thread.start()\n\n @self.app.callback(\n Output(\"accept-button\", \"n_clicks_timestamp\"), # Output is dummy, just to trigger the callback\n Output(\"decline-button\", \"n_clicks_timestamp\"), # Output is dummy, just to trigger the callback\n Input(\"accept-button\", \"n_clicks\"),\n Input(\"decline-button\", \"n_clicks\"),\n )\n def on_button_click(accept_clicks: int, decline_clicks: int) -> tuple:\n \"\"\"\n Callback function that is triggered when either the 'Accept' or 'Decline' button is clicked.\n It modifies a global variable to reflect which button was clicked.\n \"\"\"\n global _global_button_clicked\n\n if ctx.triggered_id == \"accept-button\":\n _global_button_clicked = \"accept\"\n elif ctx.triggered_id == \"decline-button\":\n _global_button_clicked = \"decline\"\n elif ctx.triggered_id is None:\n pass # On reload, no button is clicked\n else:\n # Raised exceptions are not shown in the console, but prints are\n print(f\"Invalid trigger: {ctx.triggered_id}\")\n return None, None\n\n @self.app.callback(\n Output(\"file-name\", \"children\"),\n Output(\"prev-statements\", \"children\"),\n Output(\"curr-statements\", \"children\"),\n Input(\"interval-component\", \"n_intervals\"),\n )\n def update_layout(n_intervals: int):\n \"\"\"\n Callback function that updates the layout at specified intervals.\n This function keeps the UI updated in real-time.\n \"\"\"\n return (\n self.file_name.children, # type: ignore\n self.prev_statements.children,\n self.curr_statements.children,\n )\n\n def teardown(self) -> None:\n \"\"\"\n Renders a blank UI and waits for the layout to update.\n \"\"\"\n self._render_blank()\n time.sleep(finish_delay) # Wait for the layout to update\n\n def _render_blank(self) -> None:\n \"\"\"\n Renders a blank UI, without any statements or buttons.\n \"\"\"\n self._render_location(None)\n self.prev_statements = self._render_statements_in_div([], \"prev-statements\")\n self.curr_statements = self._render_statements_in_div([], \"curr-statements\")\n\n def prompt_user(\n self, location: \"Location\", prev_statements: Optional[List[Statement]], curr_statements: List[Statement]\n ) -> bool:\n \"\"\"\n Prompts the user with statements for review and waits for user interaction (accept/decline).\n \"\"\"\n self._render_location(location)\n\n self.prev_statements = self._render_statements_in_div(prev_statements, \"prev-statements\")\n self.curr_statements = self._render_statements_in_div(curr_statements, \"curr-statements\")\n\n return self._get_accept_decline()\n\n def _draw_initial_layout(self) -> html.Div:\n \"\"\"\n Creates and returns the initial layout of the app, organizing the visual elements.\n \"\"\"\n\n return html.Div(\n [\n dcc.Interval(id=\"interval-component\", interval=update_interval * 1000, n_intervals=0),\n dbc.Card(\n [\n dbc.ListGroup(\n [\n dbc.CardHeader(\"\", id=\"file-name\"),\n dbc.CardGroup(\n [\n dbc.Card(\n [\n dbc.Button(\n \"Decline\",\n style={\n \"backgroundColor\": \"#d9534f\",\n \"margin\": \"10px\",\n \"width\": \"100% - 20px\",\n },\n id=\"decline-button\",\n n_clicks=0,\n ), # fmt: skip\n dbc.CardBody(id=\"prev-statements\"),\n dbc.CardFooter(\"Previously accepted\", style={\"textAlign\": \"center\"}),\n ],\n ),\n dbc.Card(\n [\n dbc.Button(\n \"Accept\",\n style={\n \"backgroundColor\": \"#5cb85c\",\n \"margin\": \"10px\",\n \"width\": \"100% - 20px\",\n },\n id=\"accept-button\",\n n_clicks=0,\n ), # fmt: skip\n dbc.CardBody(id=\"curr-statements\"),\n dbc.CardFooter(\"New\", style={\"textAlign\": \"center\"}),\n ],\n ),\n ],\n style={\"display\": \"flex\", \"margin\": \"10px\"},\n ),\n ],\n flush=True,\n style={\"border\": \"1px solid #ccc\"},\n ),\n ],\n style={\"padding\": \"10px\", \"border\": \"0px\"},\n ),\n ],\n )\n\n def _render_location(self, location: Optional[\"Location\"]) -> None:\n \"\"\"\n Renders the location of the test function into the UI.\n \"\"\"\n if location is None:\n location = Location(\"\", \"\")\n\n element = [\n f\"File: {location.file_name}\",\n html.Br(),\n f\"Function: {location.function_name + '()' if location.function_name != '' else ''}\",\n ]\n self.file_name = dbc.CardHeader(element, id=\"file-name\")\n\n def _render_statements_in_div(self, statements: Optional[List[Statement]], div_id: str) -> dbc.CardBody:\n \"\"\"\n Renders statements into a specified division in the UI.\n Each statement could either be a print statement or a graphical (plotly) figure.\n \"\"\"\n\n code_style = {\n \"background-color\": \"#f8f8f8\",\n \"border\": \"1px solid #999\",\n \"display\": \"block\",\n \"padding\": \"10px\",\n \"border-radius\": \"5px\",\n \"white-space\": \"pre-wrap\",\n \"margin-top\": \"10px\",\n \"margin-bottom\": \"10px\",\n \"font-family\": \"monospace\",\n \"color\": \"black\",\n }\n plot_style = {\"padding\": \"10px\", \"margin-top\": \"10px\", \"margin-bottom\": \"10px\"}\n\n rendered_statements: list = []\n if statements is None:\n rendered_statements.append(html.P(\"Nothing to show\"))\n else:\n for cmd, contents in statements:\n if cmd == \"print\":\n rendered_statements.append(html.Code(contents, style=code_style))\n elif cmd == \"show\":\n figure = plotly.io.from_json(contents)\n rendered_statements.append(dbc.Card(dcc.Graph(figure=figure), style=plot_style))\n else:\n raise ValueError(f\"Invalid command {cmd}\")\n\n div = dbc.CardBody(rendered_statements, id=div_id)\n return div\n\n def _get_accept_decline(self) -> bool:\n \"\"\"\n Waits for the user to click either 'Accept' or 'Decline', and returns a boolean value reflecting the choice.\n \"\"\"\n global _global_button_clicked\n\n while True:\n if _global_button_clicked is not None:\n if _global_button_clicked == \"accept\":\n _global_button_clicked = None\n return True\n elif _global_button_clicked == \"decline\":\n _global_button_clicked = None\n return False\n else:\n _global_button_clicked = None\n raise ValueError(\"Invalid button clicked value\")\n else:\n time.sleep(accept_decline_polling_interval)"
},
{
"identifier": "Location",
"path": "visual/lib/ui.py",
"snippet": "class Location:\n def __init__(self, file_name: str, function_name: str) -> None:\n self.file_name = file_name\n self.function_name = function_name"
},
{
"identifier": "visual_UI",
"path": "visual/lib/ui.py",
"snippet": "@pytest.fixture(scope=\"session\")\ndef visual_UI(request: FixtureRequest) -> Generator[Optional[\"UI\"], None, None]:\n \"\"\"\n A pytest fixture that conditionally sets up and tears down a UI object for visualization purposes during testing.\n\n The decision to yield a UI object or None is based on flags obtained from the test session configuration.\n The UI object, if created, runs a Dash server in a separate thread for the duration of the test session.\n \"\"\"\n run_visualization, yes_all, reset_all = get_visualization_flags(request)\n if run_visualization: # Yield a UI object\n ui = UI()\n yield ui\n ui.teardown()\n else: # No visualizations will be shown, so no need to start the heavy Dash server\n yield None"
}
] | import os
import random
import tempfile
import numpy as np
import pytest
import torchview # isort: skip
import numpy as np
import torch
import tensorflow as tf
from typing import Generator, List, Optional
from _pytest.fixtures import FixtureRequest
from PIL import Image
from plotly.graph_objs import Figure
from visual.lib.convenience import (
correct_layout,
create_plot_from_images,
get_grid_shape,
get_image_max_value_from_type,
get_layout_from_image,
)
from visual.lib.flags import get_visualization_flags, pytest_addoption
from visual.lib.storage import (
Statement,
clear_statements,
get_storage_path,
load_statements,
store_statements,
)
from visual.lib.ui import UI, Location, visual_UI | 4,711 | """
Show text within a visualization case.
Parameters:
- text (str): The text to show.
"""
self.statements.append(["print", text])
def show_figure(self, figure: Figure) -> None:
"""
Show a plotly figure within a visualization case.
Parameters:
- fig (Figure): The figure to show.
"""
self.statements.append(["show", str(figure.to_json())])
# Convenience interface
def show_images(
self,
images: List[np.ndarray],
labels: Optional[List[str]] = None,
max_cols: int = 3,
height_per_row: float = 300,
) -> None:
"""
Convenience method to show a grid of images. Only accepts standardized numpy images.
Parameters:
- images (List[np.ndarray]): A list of images to show.
- labels (Optional[List[str]]): A list of labels for each image.
- max_cols (int): Maximum number of columns in the grid.
- height_per_row (float): The height of each row in the grid.
"""
assert all(isinstance(image, np.ndarray) for image in images), "Images must be numpy arrays"
assert len(images) > 0, "At least one image must be specified"
grid_shape = get_grid_shape(len(images), max_cols)
total_height = None if height_per_row is None else height_per_row * grid_shape[0]
figure = create_plot_from_images(images, labels, grid_shape, total_height)
self.show_figure(figure)
def show_image(
self,
image: np.ndarray,
label: Optional[str] = None,
height: float = 600,
) -> None:
"""
Convenience method to show a single image. Only accepts standardized numpy images.
Parameters:
- image (np.ndarray): The image to show.
- label (Optional[str]): A label for the image.
- height (float): The height of the image.
"""
labels = None if label is None else [label]
self.show_images([image], labels, max_cols=1, height_per_row=height)
def show_model(
self,
model,
input_size,
depth: int = 100,
height: float = 1500,
) -> None:
"""
Convenience method to show a PyTorch model. Requires the torchview package.
Parameters:
- model (torch.nn.Module): The model to show.
- input_size (Tuple[int, ...]): The input size of the model.
- depth (int): The maximum depth of the model to show.
- height (float): The height of the image.
"""
plot = torchview.draw_graph(model, input_size=input_size, depth=depth)
# Create temporary file path
tempfile_path = tempfile.mktemp()
plot.visual_graph.render(tempfile_path, format="png")
# Read image and show
image = np.array(Image.open(tempfile_path + ".png"))
self.show_image(image, height=height)
# Remove temporary file
os.remove(tempfile_path)
os.remove(tempfile_path + ".png")
@pytest.fixture
def visual(request: FixtureRequest, visual_UI: UI) -> Generator[VisualFixture, None, None]:
"""
A pytest fixture that manages the visualization process during test execution.
Parameters:
- request (FixtureRequest): The current pytest request.
- visual_UI (UI): An instance of the UI class for user interaction.
Yields:
- VisualFixture: An object to collect visualization statements.
"""
run_visualization, yes_all, reset_all = get_visualization_flags(request)
visualizer = VisualFixture()
storage_path = get_storage_path(request)
if run_visualization:
failed_tests1 = request.session.testsfailed
yield visualizer # Run test
failed_tests2 = request.session.testsfailed
if failed_tests2 > failed_tests1:
return # Test failed, so no visualization
statements = visualizer.statements
if not yes_all:
# Read previous statements
|
class VisualFixture:
def __init__(self):
"""
An object to collect visualization statements.
"""
self.statements: List[Statement] = []
# Core interface
def print(self, text: str) -> None:
"""
Show text within a visualization case.
Parameters:
- text (str): The text to show.
"""
self.statements.append(["print", text])
def show_figure(self, figure: Figure) -> None:
"""
Show a plotly figure within a visualization case.
Parameters:
- fig (Figure): The figure to show.
"""
self.statements.append(["show", str(figure.to_json())])
# Convenience interface
def show_images(
self,
images: List[np.ndarray],
labels: Optional[List[str]] = None,
max_cols: int = 3,
height_per_row: float = 300,
) -> None:
"""
Convenience method to show a grid of images. Only accepts standardized numpy images.
Parameters:
- images (List[np.ndarray]): A list of images to show.
- labels (Optional[List[str]]): A list of labels for each image.
- max_cols (int): Maximum number of columns in the grid.
- height_per_row (float): The height of each row in the grid.
"""
assert all(isinstance(image, np.ndarray) for image in images), "Images must be numpy arrays"
assert len(images) > 0, "At least one image must be specified"
grid_shape = get_grid_shape(len(images), max_cols)
total_height = None if height_per_row is None else height_per_row * grid_shape[0]
figure = create_plot_from_images(images, labels, grid_shape, total_height)
self.show_figure(figure)
def show_image(
self,
image: np.ndarray,
label: Optional[str] = None,
height: float = 600,
) -> None:
"""
Convenience method to show a single image. Only accepts standardized numpy images.
Parameters:
- image (np.ndarray): The image to show.
- label (Optional[str]): A label for the image.
- height (float): The height of the image.
"""
labels = None if label is None else [label]
self.show_images([image], labels, max_cols=1, height_per_row=height)
def show_model(
self,
model,
input_size,
depth: int = 100,
height: float = 1500,
) -> None:
"""
Convenience method to show a PyTorch model. Requires the torchview package.
Parameters:
- model (torch.nn.Module): The model to show.
- input_size (Tuple[int, ...]): The input size of the model.
- depth (int): The maximum depth of the model to show.
- height (float): The height of the image.
"""
plot = torchview.draw_graph(model, input_size=input_size, depth=depth)
# Create temporary file path
tempfile_path = tempfile.mktemp()
plot.visual_graph.render(tempfile_path, format="png")
# Read image and show
image = np.array(Image.open(tempfile_path + ".png"))
self.show_image(image, height=height)
# Remove temporary file
os.remove(tempfile_path)
os.remove(tempfile_path + ".png")
@pytest.fixture
def visual(request: FixtureRequest, visual_UI: UI) -> Generator[VisualFixture, None, None]:
"""
A pytest fixture that manages the visualization process during test execution.
Parameters:
- request (FixtureRequest): The current pytest request.
- visual_UI (UI): An instance of the UI class for user interaction.
Yields:
- VisualFixture: An object to collect visualization statements.
"""
run_visualization, yes_all, reset_all = get_visualization_flags(request)
visualizer = VisualFixture()
storage_path = get_storage_path(request)
if run_visualization:
failed_tests1 = request.session.testsfailed
yield visualizer # Run test
failed_tests2 = request.session.testsfailed
if failed_tests2 > failed_tests1:
return # Test failed, so no visualization
statements = visualizer.statements
if not yes_all:
# Read previous statements | location = Location(request.node.module.__file__, request.node.name) # type: ignore | 9 | 2023-10-18 07:13:37+00:00 | 8k |
SLDGroup/G-CASCADE | test_ISIC2018.py | [
{
"identifier": "PVT_GCASCADE",
"path": "lib/networks.py",
"snippet": "class PVT_GCASCADE(nn.Module):\r\n def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'):\r\n super(PVT_GCASCADE, self).__init__()\r\n\r\n self.skip_aggregation = skip_aggregation\r\n self.n_class = n_class\r\n \r\n # conv block to convert single channel to 3 channels\r\n self.conv_1cto3c = nn.Sequential(\r\n nn.Conv2d(1, 3, kernel_size=1),\r\n nn.BatchNorm2d(3),\r\n nn.ReLU(inplace=True)\r\n )\r\n \r\n # backbone network initialization with pretrained weight\r\n self.backbone = pvt_v2_b2() # [64, 128, 320, 512]\r\n path = './pretrained_pth/pvt/pvt_v2_b2.pth'\r\n save_model = torch.load(path)\r\n model_dict = self.backbone.state_dict()\r\n state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()}\r\n model_dict.update(state_dict)\r\n self.backbone.load_state_dict(model_dict)\r\n \r\n self.channels = [512, 320, 128, 64]\r\n \r\n # decoder initialization\r\n if self.skip_aggregation == 'additive':\r\n self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)\r\n elif self.skip_aggregation == 'concatenation':\r\n self.decoder = GCASCADE_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)\r\n self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2]\r\n else:\r\n print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.')\r\n self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)\r\n\r\n print('Model %s created, param count: %d' %\r\n ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()])))\r\n \r\n # Prediction heads initialization\r\n self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1)\r\n self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1)\r\n self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1)\r\n self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1)\r\n \r\n\r\n def forward(self, x):\r\n \r\n # if grayscale input, convert to 3 channels\r\n if x.size()[1] == 1:\r\n x = self.conv_1cto3c(x)\r\n \r\n # transformer backbone as encoder\r\n x1, x2, x3, x4 = self.backbone(x)\r\n \r\n # decoder\r\n x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1])\r\n \r\n # prediction heads \r\n p1 = self.out_head1(x1_o)\r\n p2 = self.out_head2(x2_o)\r\n p3 = self.out_head3(x3_o)\r\n p4 = self.out_head4(x4_o)\r\n \r\n p1 = F.interpolate(p1, scale_factor=32, mode='bilinear')\r\n p2 = F.interpolate(p2, scale_factor=16, mode='bilinear')\r\n p3 = F.interpolate(p3, scale_factor=8, mode='bilinear')\r\n p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') \r\n return p1, p2, p3, p4\r"
},
{
"identifier": "MERIT_GCASCADE",
"path": "lib/networks.py",
"snippet": "class MERIT_GCASCADE(nn.Module):\r\n def __init__(self, n_class=1, img_size_s1=(256,256), img_size_s2=(224,224), k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', interpolation='bilinear', skip_aggregation='additive'):\r\n super(MERIT_GCASCADE, self).__init__()\r\n \r\n self.interpolation = interpolation\r\n self.img_size_s1 = img_size_s1\r\n self.img_size_s2 = img_size_s2\r\n self.skip_aggregation = skip_aggregation\r\n self.n_class = n_class\r\n \r\n # conv block to convert single channel to 3 channels\r\n self.conv_1cto3c = nn.Sequential(\r\n nn.Conv2d(1, 3, kernel_size=1),\r\n nn.BatchNorm2d(3),\r\n nn.ReLU(inplace=True)\r\n )\r\n \r\n # backbone network initialization with pretrained weight\r\n self.backbone1 = maxxvit_rmlp_small_rw_256_4out() # [64, 128, 320, 512]\r\n self.backbone2 = maxvit_rmlp_small_rw_224_4out() # [64, 128, 320, 512]\r\n \r\n print('Loading:', './pretrained_pth/maxvit/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth')\r\n state_dict1 = torch.load('./pretrained_pth/maxvit/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth') \r\n self.backbone1.load_state_dict(state_dict1, strict=False)\r\n \r\n print('Loading:', './pretrained_pth/maxvit/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth') \r\n state_dict2 = torch.load('./pretrained_pth/maxvit/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth') \r\n self.backbone2.load_state_dict(state_dict2, strict=False)\r\n \r\n print('Pretrain weights loaded.')\r\n \r\n self.channels = [768, 384, 192, 96]\r\n \r\n # decoder initialization \r\n if self.skip_aggregation == 'additive':\r\n self.decoder1 = GCASCADE(channels=self.channels, img_size=img_size_s1[0], k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)\r\n self.decoder2 = GCASCADE(channels=self.channels, img_size=img_size_s2[0], k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)\r\n elif self.skip_aggregation == 'concatenation':\r\n self.decoder1 = GCASCADE_Cat(channels=self.channels, img_size=img_size_s1[0], k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)\r\n self.decoder2 = GCASCADE_Cat(channels=self.channels, img_size=img_size_s2[0], k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)\r\n self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2]\r\n else:\r\n print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.')\r\n self.decoder1 = GCASCADE(channels=self.channels, img_size=img_size_s1[0], k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)\r\n self.decoder2 = GCASCADE(channels=self.channels, img_size=img_size_s2[0], k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)\r\n \r\n print('Model %s created, param count: %d' %\r\n ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder1.parameters()])))\r\n print('Model %s created, param count: %d' %\r\n ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder2.parameters()])))\r\n \r\n # Prediction heads initialization\r\n self.out_head1 = nn.Conv2d(self.channels[0], n_class, 1)\r\n self.out_head2 = nn.Conv2d(self.channels[1], n_class, 1)\r\n self.out_head3 = nn.Conv2d(self.channels[2], n_class, 1)\r\n self.out_head4 = nn.Conv2d(self.channels[3], n_class, 1)\r\n\r\n self.out_head4_in = nn.Conv2d(self.channels[3], 1, 1)\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n \r\n # if grayscale input, convert to 3 channels\r\n if x.size()[1] == 1:\r\n x = self.conv_1cto3c(x)\r\n \r\n # transformer backbone as encoder\r\n f1 = self.backbone1(F.interpolate(x, size=self.img_size_s1, mode=self.interpolation)) \r\n #print([f1[3].shape,f1[2].shape,f1[1].shape,f1[0].shape])\r\n \r\n # decoder\r\n x11_o, x12_o, x13_o, x14_o = self.decoder1(f1[3], [f1[2], f1[1], f1[0]])\r\n\r\n # prediction heads \r\n p11 = self.out_head1(x11_o)\r\n p12 = self.out_head2(x12_o)\r\n p13 = self.out_head3(x13_o)\r\n p14 = self.out_head4(x14_o)\r\n\r\n p14_in = self.out_head4_in(x14_o)\r\n p14_in = self.sigmoid(p14_in)\r\n \r\n\r\n p11 = F.interpolate(p11, scale_factor=32, mode=self.interpolation)\r\n p12 = F.interpolate(p12, scale_factor=16, mode=self.interpolation)\r\n p13 = F.interpolate(p13, scale_factor=8, mode=self.interpolation)\r\n p14 = F.interpolate(p14, scale_factor=4, mode=self.interpolation)\r\n\r\n p14_in = F.interpolate(p14_in, scale_factor=4, mode=self.interpolation) \r\n x_in = x * p14_in\r\n \r\n f2 = self.backbone2(F.interpolate(x_in, size=self.img_size_s2, mode=self.interpolation))\r\n \r\n skip1_0 = F.interpolate(f1[0], size=(f2[0].shape[-2:]), mode=self.interpolation)\r\n skip1_1 = F.interpolate(f1[1], size=(f2[1].shape[-2:]), mode=self.interpolation)\r\n skip1_2 = F.interpolate(f1[2], size=(f2[2].shape[-2:]), mode=self.interpolation)\r\n skip1_3 = F.interpolate(f1[3], size=(f2[3].shape[-2:]), mode=self.interpolation)\r\n \r\n x21_o, x22_o, x23_o, x24_o = self.decoder2(f2[3]+skip1_3, [f2[2]+skip1_2, f2[1]+skip1_1, f2[0]+skip1_0])\r\n\r\n p21 = self.out_head1(x21_o)\r\n p22 = self.out_head2(x22_o)\r\n p23 = self.out_head3(x23_o)\r\n p24 = self.out_head4(x24_o)\r\n\r\n #print([p21.shape,p22.shape,p23.shape,p24.shape])\r\n \r\n p21 = F.interpolate(p21, size=(p11.shape[-2:]), mode=self.interpolation)\r\n p22 = F.interpolate(p22, size=(p12.shape[-2:]), mode=self.interpolation)\r\n p23 = F.interpolate(p23, size=(p13.shape[-2:]), mode=self.interpolation)\r\n p24 = F.interpolate(p24, size=(p14.shape[-2:]), mode=self.interpolation)\r\n \r\n p1 = p11 + p21\r\n p2 = p12 + p22\r\n p3 = p13 + p23\r\n p4 = p14 + p24\r\n #print([p1.shape,p2.shape,p3.shape,p4.shape])\r\n return p1, p2, p3, p4\r"
},
{
"identifier": "test_dataset",
"path": "utils/dataloader.py",
"snippet": "class test_dataset:\n def __init__(self, image_root, gt_root, testsize):\n self.testsize = testsize\n self.images = [image_root + f for f in os.listdir(image_root) if f.endswith('.jpg') or f.endswith('.png')]\n self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.tif') or f.endswith('.png' or f.endswith('.jpg'))]\n self.images = sorted(self.images)\n self.gts = sorted(self.gts)\n self.transform = transforms.Compose([\n transforms.Resize((self.testsize, self.testsize)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n self.gt_transform = transforms.ToTensor()\n self.size = len(self.images)\n self.index = 0\n\n def load_data(self):\n image = self.rgb_loader(self.images[self.index])\n image = self.transform(image).unsqueeze(0)\n gt = self.binary_loader(self.gts[self.index])\n name = self.images[self.index].split('/')[-1]\n if name.endswith('.jpg'):\n name = name.split('.jpg')[0] + '.png'\n self.index += 1\n return image, gt, name\n\n def rgb_loader(self, path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n def binary_loader(self, path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('L')"
}
] | import torch
import torch.nn.functional as F
import numpy as np
import os, argparse
import sys
import cv2
import pandas as pd
from scipy import misc
from lib.networks import PVT_GCASCADE, MERIT_GCASCADE
from utils.dataloader import test_dataset | 4,902 |
jacard /= len(Y_test)
dice /= len(Y_test)
tanimoto /= len(Y_test)
return jacard, dice, tanimoto
def confusion_matrix_scorer(Y, Y_pred):
Y = Y.astype(np.int8)
Y_pred = Y_pred.astype(np.int8)
P = len(np.where(Y == 1)[0])
N = len(np.where(Y == 0)[0])
#print([P, N])
FP = len(np.where(Y - Y_pred == -1)[0])
FN = len(np.where(Y - Y_pred == 1)[0])
TP = len(np.where(Y + Y_pred ==2)[0])
TN = len(np.where(Y + Y_pred == 0)[0])
return P, N, TN, FP, FN, TP
def get_metrics(Y, pred):
Y = np.reshape(Y, pred.shape)
smooth = 1e-15
P = 0
N = 0
TN = 0
FP = 0
FN = 0
TP = 0
sensitivity = 0
specificity = 0
accuracy = 0
precision = 0
F1 = 0
MCC = 0
for i in range(len(Y)):
_p, _n, _tn, _fp, _fn, _tp = confusion_matrix_scorer(Y[i], pred[i])
P += _p
N += _n
TN += _tn
FP += _fp
FN += _fn
TP += _tp
if (np.sum(Y[i])==0) and (np.sum(pred[i])==0):
sensitivity += 1
specificity += 1
precision += 1
F1 += 1
MCC += 1
else:
if(_tp == 0):
sensitivity += 0
precision += 0
F1 += 0.0
else:
sensitivity += (_tp / (_tp + _fn))
precision += (_tp / (_tp + _fp))
F1 += (2 * ((_tp / (_tp + _fp)) * (_tp / (_tp + _fn))) / ((_tp / (_tp + _fp)) + (_tp / (_tp + _fn))))
if(_tn == 0):
specificity += 0
else:
specificity += (_tn / (_tn + _fp))
MCC += (_tp*_tn - _fp*_fn + smooth)/(np.power((_tp+_fp)*(_tp+_fn)*(_tn+_fp)*(_tn+_fn), 0.5) + smooth)
accuracy += ((_tp + _tn)/(_tp + _fn + _fp + _tn))
return P, N, TN, FP, FN, TP, sensitivity/len(Y), specificity/len(Y), accuracy/len(Y), precision/len(Y), F1/len(Y), MCC/len(Y)
def get_metrics_and_print(Y, yp, method = "PVT-GCASCADE", testset = 'test', threshold = 0.5, show = False, write = False):
rs = []
#yp = preds_test >= threshold #np.round(preds_test,0)
P, N, TN, FP, FN, TP, sensitivity, specificity, accuracy, precision, f1, mcc_cal = get_metrics(Y, yp)
jacard, dice, tanimoto = calculate_metrics(Y, yp)
cmat = [[TN, FP], [FN, TP]]
cmat_score = [[TN/N, FP/N], [FN/P, TP/P]]
print(cmat)
if show:
plt.figure(figsize = (6,6))
sns.heatmap(cmat_score, cmap="Reds", annot=True, fmt = '.2%', square=1, linewidth=2.) #cmat/np.sum(cmat)
plt.xlabel("Predictions")
plt.ylabel("True values")
plt.show()
print("Sensitivity: ", sensitivity)
print("Specificity: ", specificity)
print("Accuracy: ", accuracy)
print("Precision: ", precision)
print("Recall: ", sensitivity)
print("F1 Score: ", f1)
print("MCC: ",mcc_cal)
print('Dice: ', dice)
print('Jacard: ', jacard)
print('Tanimoto: ', tanimoto)
if(write):
results = pd.DataFrame([[method, TN, FP, FN, TP, jacard,
dice, sensitivity, specificity,
accuracy, precision, f1, mcc_cal]],
columns=['Method', 'TN', 'FP', 'FN', 'TP', 'mIoU/Jacard', 'DICE',
'Sensitivity/Recall', 'Specificity', 'Accuracy', 'Precision',
'F-score', 'MCC'])
results.to_csv('results_' + testset + '.csv', mode='a', index=False, header=False)
if __name__ == '__main__':
method_name = 'ISIC2018_811_PVT_GCASCADE_img_size384bs4_Run1'
parser = argparse.ArgumentParser()
parser.add_argument('--encoder', type=str, default='PVT', help='Name of encoder: PVT or MERIT')
parser.add_argument('--skip_aggregation', type=str, default='additive', help='Type of skip-aggregation: additive or concatenation')
parser.add_argument('--testsize', type=int, default=384, help='testing size')
parser.add_argument('--pth_path', type=str, default='./model_pth/'+method_name+'/'+method_name+'.pth')
opt = parser.parse_args()
#torch.cuda.set_device(0) # set your gpu device
if opt.encoder=='PVT':
|
def calculate_metrics(Y_test, yp):
jacard = 0
dice = 0
tanimoto = 0
smooth = 1e-15
for i in range(len(Y_test)):
yp_2 = yp[i].ravel()
y2 = Y_test[i].ravel()
intersection = yp_2 * y2
union = yp_2 + y2 - intersection
only_neg = y2 * (1-yp_2)
only_pos = (1-y2)*yp_2
if (np.sum(y2)==0) and (np.sum(yp_2)==0):
tanimoto += 1.0
jacard += 1.0
dice += 1.0
elif(np.sum(intersection)==0):
tanimoto += 0.0
jacard += 0.0
dice += 0.0
else:
tanimoto += ((np.sum(intersection) + smooth)/(np.sum(intersection) + np.sum(only_neg) + np.sum(only_pos) + smooth))
jacard += ((np.sum(intersection) + smooth)/(np.sum(union) + smooth))
dice += (2. * np.sum(intersection) + smooth) / (np.sum(yp_2) + np.sum(y2) + smooth)
jacard /= len(Y_test)
dice /= len(Y_test)
tanimoto /= len(Y_test)
return jacard, dice, tanimoto
def confusion_matrix_scorer(Y, Y_pred):
Y = Y.astype(np.int8)
Y_pred = Y_pred.astype(np.int8)
P = len(np.where(Y == 1)[0])
N = len(np.where(Y == 0)[0])
#print([P, N])
FP = len(np.where(Y - Y_pred == -1)[0])
FN = len(np.where(Y - Y_pred == 1)[0])
TP = len(np.where(Y + Y_pred ==2)[0])
TN = len(np.where(Y + Y_pred == 0)[0])
return P, N, TN, FP, FN, TP
def get_metrics(Y, pred):
Y = np.reshape(Y, pred.shape)
smooth = 1e-15
P = 0
N = 0
TN = 0
FP = 0
FN = 0
TP = 0
sensitivity = 0
specificity = 0
accuracy = 0
precision = 0
F1 = 0
MCC = 0
for i in range(len(Y)):
_p, _n, _tn, _fp, _fn, _tp = confusion_matrix_scorer(Y[i], pred[i])
P += _p
N += _n
TN += _tn
FP += _fp
FN += _fn
TP += _tp
if (np.sum(Y[i])==0) and (np.sum(pred[i])==0):
sensitivity += 1
specificity += 1
precision += 1
F1 += 1
MCC += 1
else:
if(_tp == 0):
sensitivity += 0
precision += 0
F1 += 0.0
else:
sensitivity += (_tp / (_tp + _fn))
precision += (_tp / (_tp + _fp))
F1 += (2 * ((_tp / (_tp + _fp)) * (_tp / (_tp + _fn))) / ((_tp / (_tp + _fp)) + (_tp / (_tp + _fn))))
if(_tn == 0):
specificity += 0
else:
specificity += (_tn / (_tn + _fp))
MCC += (_tp*_tn - _fp*_fn + smooth)/(np.power((_tp+_fp)*(_tp+_fn)*(_tn+_fp)*(_tn+_fn), 0.5) + smooth)
accuracy += ((_tp + _tn)/(_tp + _fn + _fp + _tn))
return P, N, TN, FP, FN, TP, sensitivity/len(Y), specificity/len(Y), accuracy/len(Y), precision/len(Y), F1/len(Y), MCC/len(Y)
def get_metrics_and_print(Y, yp, method = "PVT-GCASCADE", testset = 'test', threshold = 0.5, show = False, write = False):
rs = []
#yp = preds_test >= threshold #np.round(preds_test,0)
P, N, TN, FP, FN, TP, sensitivity, specificity, accuracy, precision, f1, mcc_cal = get_metrics(Y, yp)
jacard, dice, tanimoto = calculate_metrics(Y, yp)
cmat = [[TN, FP], [FN, TP]]
cmat_score = [[TN/N, FP/N], [FN/P, TP/P]]
print(cmat)
if show:
plt.figure(figsize = (6,6))
sns.heatmap(cmat_score, cmap="Reds", annot=True, fmt = '.2%', square=1, linewidth=2.) #cmat/np.sum(cmat)
plt.xlabel("Predictions")
plt.ylabel("True values")
plt.show()
print("Sensitivity: ", sensitivity)
print("Specificity: ", specificity)
print("Accuracy: ", accuracy)
print("Precision: ", precision)
print("Recall: ", sensitivity)
print("F1 Score: ", f1)
print("MCC: ",mcc_cal)
print('Dice: ', dice)
print('Jacard: ', jacard)
print('Tanimoto: ', tanimoto)
if(write):
results = pd.DataFrame([[method, TN, FP, FN, TP, jacard,
dice, sensitivity, specificity,
accuracy, precision, f1, mcc_cal]],
columns=['Method', 'TN', 'FP', 'FN', 'TP', 'mIoU/Jacard', 'DICE',
'Sensitivity/Recall', 'Specificity', 'Accuracy', 'Precision',
'F-score', 'MCC'])
results.to_csv('results_' + testset + '.csv', mode='a', index=False, header=False)
if __name__ == '__main__':
method_name = 'ISIC2018_811_PVT_GCASCADE_img_size384bs4_Run1'
parser = argparse.ArgumentParser()
parser.add_argument('--encoder', type=str, default='PVT', help='Name of encoder: PVT or MERIT')
parser.add_argument('--skip_aggregation', type=str, default='additive', help='Type of skip-aggregation: additive or concatenation')
parser.add_argument('--testsize', type=int, default=384, help='testing size')
parser.add_argument('--pth_path', type=str, default='./model_pth/'+method_name+'/'+method_name+'.pth')
opt = parser.parse_args()
#torch.cuda.set_device(0) # set your gpu device
if opt.encoder=='PVT': | model = PVT_GCASCADE(n_class=1, img_size=opt.img_size, k=11, padding=5, conv='mr', gcb_act='gelu', skip_aggregation=opt.skip_aggregation) | 0 | 2023-10-24 17:49:10+00:00 | 8k |
StackTipsLab/bloggy | bloggy/models/post.py | [
{
"identifier": "settings",
"path": "bloggy/settings.py",
"snippet": "BASE_DIR = Path(__file__).resolve().parent.parent\nSECRET_KEY = os.getenv(\"SECRET_KEY\", get_random_secret_key())\nDEBUG = os.getenv(\"DEBUG\", \"False\") == \"True\"\nALLOWED_HOSTS = os.getenv(\"ALLOWED_HOSTS\", \"127.0.0.1, localhost\").split(\",\")\nINTERNAL_IPS = ['127.0.0.1']\nSITE_URL = os.getenv(\"SITE_URL\")\nINSTALLED_APPS = [\n 'bloggy',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # sitemap\n 'django.contrib.sitemaps',\n\n # 'tinymce',\n 'widget_tweaks',\n 'django_summernote',\n 'whitenoise.runserver_nostatic',\n\n 'rest_framework',\n 'bloggy_api',\n 'mail_templated', # Used for templated email https://github.com/artemrizhov/django-mail-templated\n 'storages',\n 'debug_toolbar', # dev only\n\n 'hitcount',\n 'colorfield'\n]\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'bloggy.middleware.slash_middleware.AppendOrRemoveSlashMiddleware', # Remove slash from url\n\n # Cache\n 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.cache.FetchFromCacheMiddleware',\n # Cache\n\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n # Social login\n # 'social_django.middleware.SocialAuthExceptionMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'bloggy.middleware.redirect.RedirectMiddleware', # new articles mismatch url redirect\n]\nROOT_URLCONF = 'bloggy.urls'\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': os.path.join(BASE_DIR, '/bloggy/templates'),\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'bloggy.context_processors.seo_attrs',\n 'bloggy.context_processors.app_settings',\n\n # Social login\n # 'social_django.context_processors.backends',\n # 'social_django.context_processors.login_redirect',\n ],\n },\n },\n]\nWSGI_APPLICATION = 'bloggy.wsgi.application'\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.getenv('DB_NAME'),\n 'USER': os.getenv('DB_USER'),\n 'PASSWORD': os.getenv('DB_PASSWORD'),\n 'HOST': os.getenv('DB_HOST'),\n 'PORT': os.getenv('DB_PORT'),\n 'OPTIONS': {'charset': 'utf8mb4', 'use_unicode': True},\n }\n}\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\nSTATIC_URL = '/static/'\nUSE_SPACES = os.getenv('USE_SPACES') == 'True'\nAWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')\nAWS_S3_ENDPOINT_URL = f'https://{os.getenv(\"AWS_S3_ENDPOINT_URL\")}'\n AWS_DEFAULT_ACL = 'public-read'\n AWS_QUERYSTRING_AUTH = False\n AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}\n AWS_LOCATION = 'static'\n STATIC_URL = f'{os.getenv(\"ASSETS_DOMAIN\")}/static/'\n STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n MEDIA_URL = '/media/'\n DEFAULT_FILE_STORAGE = 'bloggy.storage_backends.PublicMediaStorage'\n PRIVATE_MEDIA_LOCATION = 'private'\n PRIVATE_FILE_STORAGE = 'bloggy.storage_backends.PrivateMediaStorage'\n AWS_S3_CUSTOM_DOMAIN = 'media.stacktips.com'\n STATIC_URL = '/static/'\n STATIC_ROOT = os.path.join(BASE_DIR, 'bloggy/static')\n MEDIA_URL = '/media/'\n MEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nTINYMCE_DEFAULT_CONFIG = {\n 'plugins': 'code',\n 'toolbar': 'code',\n}\nLOGIN_URL = 'login'\nLOGOUT_URL = 'logout'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/'\nAUTH_USER_MODEL = 'bloggy.User'\nAUTH_USER_DEFAULT_GROUP = 'bloggy-members'\nSUMMERNOTE_THEME = 'bs4'\nSUMMERNOTE_CONFIG = {\n 'iframe': True,\n 'summernote': {\n 'width': '1000',\n 'height': '720',\n 'styleTags': [\n 'p',\n {\n 'title': 'Blockquote',\n 'tag': 'blockquote',\n 'className': 'blockquote',\n 'value': 'blockquote'\n },\n {\n 'title': 'Code Block',\n 'tag': 'pre',\n 'className': 'prettyprint lang-java',\n 'value': 'pre'\n },\n 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'\n ],\n\n 'airMode': False,\n 'toolbar': [\n ['style', ['style']],\n ['font', ['bold', 'underline', 'clear']],\n ['color', ['color']],\n ['para', ['ul', 'ol', 'paragraph']],\n ['table', ['table']],\n ['insert', ['link', 'picture', 'code']],\n ['view', ['fullscreen', 'codeview', 'help']],\n ],\n },\n\n 'codemirror': {\n 'mode': 'htmlmixed',\n 'lineNumbers': 'true',\n 'theme': 'monokai',\n },\n\n 'css': (\n '//cdnjs.cloudflare.com/ajax/libs/codemirror/5.29.0/theme/monokai.min.css',\n ),\n 'attachment_require_authentication': True,\n 'attachment_upload_to': 'uploads/summernote',\n 'attachment_model': 'bloggy.Media',\n 'attachment_absolute_uri': False\n\n}\nMESSAGE_STORAGE = \"django.contrib.messages.storage.cookie.CookieStorage\"\nSITE_TITLE = os.getenv(\"SITE_TITLE\", \"Bloggy\")\nSITE_TAGLINE = os.getenv(\"SITE_TAGLINE\", \"A perfectly crafted blog that developers love.\")\nSITE_DESCRIPTION = os.getenv(\"SITE_DESCRIPTION\")\nSITE_LOGO = os.getenv(\"SITE_LOGO\")\nASSETS_DOMAIN = os.getenv(\"ASSETS_DOMAIN\")\nGOOGLE_RECAPTHCA_SECRET_KEY = os.getenv('GOOGLE_RECAPTHCA_SECRET_KEY')\nGOOGLE_RECAPTHCA_TOKEN_VERIFY_URL = 'https://www.google.com/recaptcha/api/siteverify'\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'bloggy_api.pagination.CustomPaginatedResponse',\n 'PAGE_SIZE': 30,\n\n 'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.SessionAuthentication'\n ]\n}\nCACHE_TTL = 60 * 15\nCACHE_MIDDLEWARE_ALIAS = 'default' # which cache alias to use\nCACHE_MIDDLEWARE_SECONDS = CACHE_TTL # number of seconds to cache a page for (TTL)\nCACHE_MIDDLEWARE_KEY_PREFIX = '' # should be used if the cache is shared across multiple sites that use the same\nENABLE_CACHING = os.getenv(\"ENABLE_CACHING\", \"False\") == \"True\"\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': os.getenv(\"MEMCACHIER_SERVERS\", \"127.0.0.1:11211\"),\n \"OPTIONS\": {\n \"binary\": True,\n # 'username': os.getenv(\"MEMCACHIER_USERNAME\", \"\"),\n # 'password': os.getenv(\"MEMCACHIER_PASSWORD\", \"\"),\n \"behaviors\": {\n \"ketama\": True,\n },\n },\n }\n }\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n }\nHITCOUNT_KEEP_HIT_ACTIVE = {'days': 0}\nHITCOUNT_KEEP_HIT_IN_DATABASE = {'days': 77}\nHITCOUNT_HITS_PER_IP_LIMIT = 0\nSHORTCODES_YOUTUBE_JQUERY = False\nPING_INDEX_NOW_POST_UPDATE = os.getenv(\"PING_INDEX_NOW_POST_UPDATE\", \"True\")\nPING_GOOGLE_POST_UPDATE = os.getenv(\"PING_GOOGLE_POST_UPDATE\", \"True\")\nINDEX_NOW_API_KEY = os.getenv(\"INDEX_NOW_API_KEY\", )\nEMAIL_BACKEND = os.getenv('EMAIL_BACKEND')\nEMAIL_HOST = os.getenv('EMAIL_HOST')\nEMAIL_PORT = os.getenv('EMAIL_PORT')\nEMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')\nEMAIL_USE_TLS = os.getenv('EMAIL_USE_TLS', \"True\")\nDEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL')\nEMAIL_FILE_PATH = os.getenv('EMAIL_FILE_PATH', os.path.join(BASE_DIR, 'test-emails'))\nPOST_TYPE_CHOICES = os.getenv('POST_TYPE_CHOICES')\nSHOW_EMTPY_CATEGORIES = os.getenv(\"SHOW_EMTPY_CATEGORIES\", \"False\") == \"True\"\nLOAD_GOOGLE_TAG_MANAGER = os.getenv(\"LOAD_GOOGLE_TAG_MANAGER\", \"False\") == \"True\"\nLOAD_GOOGLE_ADS = os.getenv(\"LOAD_GOOGLE_ADS\", \"False\") == \"True\"\nMY_ADS_TXT_CONTENT = os.getenv('MY_ADS_TXT_CONTENT')\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"DJANGO_LOG_LEVEL\", \"INFO\"),\n \"propagate\": False,\n },\n },\n}\ndef get_post_types():"
},
{
"identifier": "Category",
"path": "bloggy/models.py",
"snippet": ""
},
{
"identifier": "Course",
"path": "bloggy/models/course.py",
"snippet": "class Course(Content):\n difficulty = models.CharField(\n max_length=20, choices=[\n ('beginner', 'Beginner'),\n ('intermediate', 'Intermediate'),\n ('advance', 'advance'),\n ],\n default='easy', blank=True, null=True,\n help_text=\"Select difficulty\",\n verbose_name=\"Difficulty level\")\n\n is_featured = models.BooleanField(\n default=False,\n help_text=\"Should this story be featured on site?\"\n )\n\n description = models.TextField(null=True, help_text='Enter answer')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='courses')\n thumbnail = models.ImageField(upload_to=upload_thumbnail_image, null=True, blank=True)\n category = models.ForeignKey(Category, blank=True, on_delete=models.CASCADE, related_name='courses')\n view_count = GenericRelation(HitCount, object_id_field='object_pk', related_query_name='hit_count_generic_relation')\n\n class Meta:\n ordering = ['-display_order']\n verbose_name = \"course\"\n verbose_name_plural = \"courses\"\n indexes = [\n models.Index(fields=['slug', 'publish_status', 'published_date']),\n ]\n\n def get_absolute_url(self):\n return reverse(\"courses_single\", kwargs={\"slug\": str(self.slug)})\n\n @property\n def get_lessons(self):\n return self.post_set.filter(publish_status=\"LIVE\").order_by(\"display_order\").all()\n\n def thumbnail_tag(self):\n if self.thumbnail:\n return format_html(f'<img src=\"{self.thumbnail.url}\" width=\"auto\" height=\"40\"/>')\n return \"\"\n\n thumbnail_tag.short_description = 'Logo'\n thumbnail_tag.allow_tags = True"
},
{
"identifier": "Content",
"path": "bloggy/models/mixin/Content.py",
"snippet": "class Content(Updatable, SeoAware):\n title = models.CharField(max_length=300, help_text='Enter title')\n slug = models.SlugField(max_length=150, help_text='Enter slug', unique=True)\n excerpt = models.CharField(\n max_length=500,\n help_text='Enter excerpt',\n null=True,\n blank=True\n )\n\n display_order = models.IntegerField(null=True, help_text='Display order', default=0)\n published_date = models.DateTimeField(null=True, blank=True)\n publish_status = models.CharField(\n max_length=20, choices=[\n ('DRAFT', 'DRAFT'),\n ('LIVE', 'LIVE'),\n ('DELETED', 'DELETED')\n ],\n default='DRAFT', blank=True, null=True,\n help_text=\"Select publish status\",\n verbose_name=\"Publish status\")\n\n def get_excerpt(self):\n return self.excerpt[:10]\n\n def save(self, *args, **kwargs):\n if StringUtils.is_blank(self.slug):\n self.slug = slugify(self.title)\n super().save(*args, **kwargs)\n\n def __str__(self):\n return str(self.title)\n\n class Meta:\n abstract = True"
},
{
"identifier": "StringUtils",
"path": "bloggy/utils/string_utils.py",
"snippet": "class StringUtils:\n @staticmethod\n def is_blank(text):\n return not (text and text.strip())\n\n @staticmethod\n def is_not_blank(text):\n return bool(text and text.strip())\n\n @staticmethod\n def to_json(text):\n tmp_json = serializers.serialize(\"json\", text)\n return json.dumps(json.loads(tmp_json))"
}
] | from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.db.models import TextField
from django.urls import reverse
from django.utils.html import format_html
from django.utils.text import slugify
from hitcount.models import HitCount
from bloggy import settings
from bloggy.models import Category, Bookmark
from bloggy.models.course import Course
from bloggy.models.mixin.Content import Content
from bloggy.utils.string_utils import StringUtils
import bloggy | 3,932 |
def upload_thumbnail_image(self, post_id):
return f'uploads/posts/{post_id}'
class Post(Content):
difficulty = models.CharField(
max_length=20, choices=[
('beginner', 'Beginner'),
('intermediate', 'Intermediate'),
('advance', 'advance'),
],
default='easy', blank=True, null=True,
help_text="Select difficulty",
verbose_name="Difficulty level")
is_featured = models.BooleanField(
default=False,
help_text="Should this story be featured on site?"
)
video_id = models.CharField(
max_length=100,
help_text='YouTube Video ID',
null=True,
blank=True
)
github_link = models.CharField(
max_length=200,
help_text='Github project link',
null=True,
blank=True
)
post_type = models.CharField(
max_length=20, choices=settings.get_post_types(),
default='article', blank=True, null=True,
help_text="Post type",
verbose_name="Post type")
template_type = models.CharField(
max_length=20, choices=[
('standard', 'Standard'),
('cover', 'Cover'),
('naked', 'Naked'),
('full', 'Full'),
],
default='standard', blank=True, null=True,
help_text="Template type",
verbose_name="Template type")
content = TextField(null=True, help_text='Post content')
thumbnail = models.ImageField(upload_to=upload_thumbnail_image, blank=True, null=True)
# This comes from Django HitCount
view_count = GenericRelation(
HitCount,
object_id_field='object_pk',
related_query_name='hit_count_generic_relation'
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='posts'
)
|
def upload_thumbnail_image(self, post_id):
return f'uploads/posts/{post_id}'
class Post(Content):
difficulty = models.CharField(
max_length=20, choices=[
('beginner', 'Beginner'),
('intermediate', 'Intermediate'),
('advance', 'advance'),
],
default='easy', blank=True, null=True,
help_text="Select difficulty",
verbose_name="Difficulty level")
is_featured = models.BooleanField(
default=False,
help_text="Should this story be featured on site?"
)
video_id = models.CharField(
max_length=100,
help_text='YouTube Video ID',
null=True,
blank=True
)
github_link = models.CharField(
max_length=200,
help_text='Github project link',
null=True,
blank=True
)
post_type = models.CharField(
max_length=20, choices=settings.get_post_types(),
default='article', blank=True, null=True,
help_text="Post type",
verbose_name="Post type")
template_type = models.CharField(
max_length=20, choices=[
('standard', 'Standard'),
('cover', 'Cover'),
('naked', 'Naked'),
('full', 'Full'),
],
default='standard', blank=True, null=True,
help_text="Template type",
verbose_name="Template type")
content = TextField(null=True, help_text='Post content')
thumbnail = models.ImageField(upload_to=upload_thumbnail_image, blank=True, null=True)
# This comes from Django HitCount
view_count = GenericRelation(
HitCount,
object_id_field='object_pk',
related_query_name='hit_count_generic_relation'
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='posts'
) | category = models.ManyToManyField(Category) | 1 | 2023-10-17 14:50:39+00:00 | 8k |
Iniquitatis/sd-webui-temporal | temporal/image_generation.py | [
{
"identifier": "clear_directory",
"path": "temporal/fs.py",
"snippet": "def clear_directory(path, pattern = None):\n if not path.is_dir():\n return path\n\n for entry in path.iterdir():\n if pattern and not entry.match(pattern):\n continue\n\n if entry.is_file():\n entry.unlink()\n elif entry.is_dir():\n clear_directory(entry)\n entry.rmdir()\n\n return path"
},
{
"identifier": "ensure_directory_exists",
"path": "temporal/fs.py",
"snippet": "def ensure_directory_exists(path):\n if not path.is_dir():\n path.mkdir(parents = True)\n\n return path"
},
{
"identifier": "remove_directory",
"path": "temporal/fs.py",
"snippet": "def remove_directory(path):\n if path.is_dir():\n rmtree(path)\n\n return path"
},
{
"identifier": "make_func_registerer",
"path": "temporal/func_utils.py",
"snippet": "def make_func_registerer(**default_params):\n registered = {}\n\n def wrapper(key, *args, **kwargs):\n def decorator(func):\n registered[key] = SimpleNamespace(func = func, **(\n deepcopy(default_params) |\n {k: v for k, v in zip(default_params.keys(), args)} |\n kwargs\n ))\n return func\n return decorator\n\n return registered, wrapper"
},
{
"identifier": "ImageBuffer",
"path": "temporal/image_buffer.py",
"snippet": "class ImageBuffer:\n def __init__(self, width, height, channels, count):\n self.array = np.zeros((count, height, width, channels))\n self.last_index = 0\n\n @property\n def width(self):\n return self.array.shape[2]\n\n @property\n def height(self):\n return self.array.shape[1]\n\n @property\n def channels(self):\n return self.array.shape[3]\n\n @property\n def count(self):\n return self.array.shape[0]\n\n def init(self, im):\n npim = self._convert_image_to_np(im)\n\n for i in range(self.count):\n self.array[i] = npim\n\n def add(self, im):\n self.array[self.last_index] = self._convert_image_to_np(im)\n\n self.last_index += 1\n self.last_index %= self.count\n\n def average(self, trimming = 0.0, easing = 0.0, preference = 0.0):\n return np_to_pil(self.array[0] if self.count == 1 else np.clip(average_array(\n self.array,\n axis = 0,\n trim = trimming,\n power = preference + 1.0,\n weights = np.roll(make_eased_weight_array(self.count, easing), self.last_index),\n ), 0.0, 1.0))\n\n def load(self, project_dir):\n buffer_dir = project_dir / \"session\" / \"buffer\"\n\n if data := load_json(buffer_dir / \"data.json\"):\n load_object(self, data, buffer_dir)\n\n def save(self, project_dir):\n buffer_dir = ensure_directory_exists(project_dir / \"session\" / \"buffer\")\n\n save_json(buffer_dir / \"data.json\", save_object(self, buffer_dir))\n\n def _convert_image_to_np(self, im):\n return pil_to_np(ensure_image_dims(\n im,\n \"RGBA\" if self.channels == 4 else \"RGB\",\n (self.width, self.height),\n ))"
},
{
"identifier": "PREPROCESSORS",
"path": "temporal/image_preprocessing.py",
"snippet": "def preprocess_image(im, ext_params, seed):\n def __init__(self, type, key, name, **kwargs):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\n def stretch_array(arr, new_length):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _apply_mask(npim, processed, amount, blend_mode, mask, normalized, inverted, blurring, reference):\nclass UIParam:"
},
{
"identifier": "average_images",
"path": "temporal/image_utils.py",
"snippet": "def average_images(ims, trimming = 0.0, easing = 0.0, preference = 0.0):\n return ims[0] if len(ims) == 1 else np_to_pil(np.clip(average_array(\n np.stack([pil_to_np(im) if isinstance(im, Image.Image) else im for im in ims]),\n axis = 0,\n trim = trimming,\n power = preference + 1.0,\n weights = np.flip(make_eased_weight_array(len(ims), easing)),\n ), 0.0, 1.0))"
},
{
"identifier": "ensure_image_dims",
"path": "temporal/image_utils.py",
"snippet": "def ensure_image_dims(im, mode, size):\n if is_np := isinstance(im, np.ndarray):\n im = Image.fromarray(skimage.util.img_as_ubyte(im))\n\n if im.mode != mode:\n im = im.convert(mode)\n\n if im.size != size:\n im = im.resize(size, Image.Resampling.LANCZOS)\n\n return skimage.util.img_as_float(im) if is_np else im"
},
{
"identifier": "generate_value_noise_image",
"path": "temporal/image_utils.py",
"snippet": "def generate_value_noise_image(size, channels, scale, octaves, lacunarity, persistence, seed = None):\n return np_to_pil(generate_value_noise((size[1], size[0], channels), scale, octaves, lacunarity, persistence, seed))"
},
{
"identifier": "save_image",
"path": "temporal/image_utils.py",
"snippet": "def save_image(im, path, archive_mode = False):\n tmp_path = path.with_suffix(\".tmp\")\n\n if path.is_file():\n path.unlink()\n\n if tmp_path.is_file():\n tmp_path.unlink()\n\n im.save(tmp_path, \"PNG\", **(dict(\n optimize = True,\n compress_level = 9,\n ) if archive_mode else {}))\n tmp_path.rename(path)"
},
{
"identifier": "Metrics",
"path": "temporal/metrics.py",
"snippet": "class Metrics:\n def __init__(self):\n self.luminance_mean = []\n self.luminance_std = []\n self.color_level_mean = []\n self.color_level_std = []\n self.noise_sigma = []\n\n def measure(self, im):\n npim = skimage.img_as_float(im)\n grayscale = skimage.color.rgb2gray(npim[..., :3], channel_axis = 2)\n red, green, blue = npim[..., 0], npim[..., 1], npim[..., 2]\n\n self.luminance_mean.append(np.mean(grayscale))\n self.luminance_std.append(np.std(grayscale))\n self.color_level_mean.append([np.mean(red), np.mean(green), np.mean(blue)])\n self.color_level_std.append([np.std(red), np.std(green), np.std(blue)])\n self.noise_sigma.append(skimage.restoration.estimate_sigma(npim, average_sigmas = True, channel_axis = 2))\n\n def load(self, project_dir):\n metrics_dir = project_dir / \"metrics\"\n\n if data := load_json(metrics_dir / \"data.json\"):\n load_object(self, data, metrics_dir)\n\n def save(self, project_dir):\n metrics_dir = ensure_directory_exists(project_dir / \"metrics\")\n\n save_json(metrics_dir / \"data.json\", save_object(self, metrics_dir))\n\n def plot(self, project_dir, save_images = False):\n metrics_dir = ensure_directory_exists(project_dir / \"metrics\")\n\n result = []\n\n @contextmanager\n def figure(title, path):\n plt.title(title)\n plt.xlabel(\"Frame\")\n plt.ylabel(\"Level\")\n plt.grid()\n\n try:\n yield\n finally:\n plt.legend()\n\n buffer = BytesIO()\n plt.savefig(buffer, format = \"png\")\n buffer.seek(0)\n\n im = Image.open(buffer)\n im.load()\n\n if save_images:\n save_image(im, path)\n\n result.append(im)\n\n plt.close()\n\n def plot_noise_graph(data, label, color):\n plt.axhline(data[0], color = color, linestyle = \":\", linewidth = 0.5)\n plt.axhline(np.mean(data), color = color, linestyle = \"--\", linewidth = 1.0)\n plt.plot(data, color = color, label = label, linestyle = \"--\", linewidth = 0.5, marker = \"+\", markersize = 3)\n\n if data.size > 3:\n plt.plot(scipy.signal.savgol_filter(data, min(data.size, 51), 3), color = color, label = f\"{label} (smoothed)\", linestyle = \"-\")\n\n with figure(\"Luminance mean\", metrics_dir / \"luminance_mean.png\"):\n plot_noise_graph(np.array(self.luminance_mean), \"Luminance\", \"gray\")\n\n with figure(\"Luminance standard deviation\", metrics_dir / \"luminance_std.png\"):\n plot_noise_graph(np.array(self.luminance_std), \"Luminance\", \"gray\")\n\n with figure(\"Color level mean\", metrics_dir / \"color_level_mean.png\"):\n plot_noise_graph(np.array(self.color_level_mean)[..., 0], \"Red\", \"darkred\")\n plot_noise_graph(np.array(self.color_level_mean)[..., 1], \"Green\", \"darkgreen\")\n plot_noise_graph(np.array(self.color_level_mean)[..., 2], \"Blue\", \"darkblue\")\n\n with figure(\"Color level standard deviation\", metrics_dir / \"color_level_std.png\"):\n plot_noise_graph(np.array(self.color_level_std)[..., 0], \"Red\", \"darkred\")\n plot_noise_graph(np.array(self.color_level_std)[..., 1], \"Green\", \"darkgreen\")\n plot_noise_graph(np.array(self.color_level_std)[..., 2], \"Blue\", \"darkblue\")\n\n with figure(\"Noise sigma\", metrics_dir / \"noise_sigma.png\"):\n plot_noise_graph(np.array(self.noise_sigma), \"Noise sigma\", \"royalblue\")\n\n return result"
},
{
"identifier": "copy_with_overrides",
"path": "temporal/object_utils.py",
"snippet": "def copy_with_overrides(obj, **overrides):\n instance = copy(obj)\n\n for key, value in overrides.items():\n if hasattr(instance, key):\n setattr(instance, key, value)\n else:\n print(f\"WARNING: Key {key} doesn't exist in {instance.__class__.__name__}\")\n\n return instance"
},
{
"identifier": "get_last_frame_index",
"path": "temporal/session.py",
"snippet": "def get_last_frame_index(frame_dir):\n def get_index(path):\n if path.is_file():\n try:\n return int(path.stem)\n except:\n print(f\"WARNING: {path} doesn't match the frame name format\")\n\n return 0\n\n return max((get_index(path) for path in frame_dir.glob(\"*.png\")), default = 0)"
},
{
"identifier": "load_last_frame",
"path": "temporal/session.py",
"snippet": "def load_last_frame(frame_dir):\n if index := get_last_frame_index(frame_dir):\n return load_image(frame_dir / f\"{index:05d}.png\")\n\n return None"
},
{
"identifier": "load_session",
"path": "temporal/session.py",
"snippet": "def load_session(p, ext_params, project_dir):\n if not (session_dir := (project_dir / \"session\")).is_dir():\n return\n\n if not (params_path := (session_dir / \"parameters.json\")).is_file():\n return\n\n upgrade_project(project_dir)\n\n data = load_json(params_path, {})\n\n # NOTE: `p.override_settings` juggles VAEs back-and-forth, slowing down the process considerably\n load_dict(opts.data, data.get(\"shared_params\", {}), session_dir)\n\n load_object(p, data.get(\"generation_params\", {}), session_dir)\n\n if external_code := import_cn():\n for unit_data, cn_unit in zip(data.get(\"controlnet_params\", []), external_code.get_all_units_in_processing(p)):\n load_object(cn_unit, unit_data, session_dir)\n\n load_object(ext_params, data.get(\"extension_params\", {}), session_dir)"
},
{
"identifier": "save_session",
"path": "temporal/session.py",
"snippet": "def save_session(p, ext_params, project_dir):\n session_dir = recreate_directory(project_dir / \"session\")\n\n save_json(session_dir / \"parameters.json\", dict(\n shared_params = save_dict(opts.data, session_dir, [\n \"sd_model_checkpoint\",\n \"sd_vae\",\n \"CLIP_stop_at_last_layers\",\n \"always_discard_next_to_last_sigma\",\n ]),\n generation_params = save_object(p, session_dir, [\n \"prompt\",\n \"negative_prompt\",\n \"init_images\",\n \"image_mask\",\n \"resize_mode\",\n \"mask_blur_x\",\n \"mask_blur_y\",\n \"inpainting_mask_invert\",\n \"inpainting_fill\",\n \"inpaint_full_res\",\n \"inpaint_full_res_padding\",\n \"sampler_name\",\n \"steps\",\n \"refiner_checkpoint\",\n \"refiner_switch_at\",\n \"width\",\n \"height\",\n \"cfg_scale\",\n \"denoising_strength\",\n \"seed\",\n \"seed_enable_extras\",\n \"subseed\",\n \"subseed_strength\",\n \"seed_resize_from_w\",\n \"seed_resize_from_h\",\n ]),\n controlnet_params = list(\n save_object(cn_unit, session_dir, [\n \"image\",\n \"enabled\",\n \"low_vram\",\n \"pixel_perfect\",\n \"module\",\n \"model\",\n \"weight\",\n \"guidance_start\",\n \"guidance_end\",\n \"processor_res\",\n \"threshold_a\",\n \"threshold_b\",\n \"control_mode\",\n \"resize_mode\",\n ])\n for cn_unit in external_code.get_all_units_in_processing(p)\n ) if (external_code := import_cn()) else [],\n extension_params = save_object(ext_params, session_dir, saved_ext_param_ids),\n ))\n save_text(session_dir / \"version.txt\", \"12\")"
},
{
"identifier": "ThreadQueue",
"path": "temporal/thread_queue.py",
"snippet": "class ThreadQueue:\n def __init__(self):\n self._queue = []\n self._execution_lock = Lock()\n self._queue_lock = Lock()\n\n @property\n def busy(self):\n with self._queue_lock:\n return len(self._queue) > 0\n\n def enqueue(self, target, *args, **kwargs):\n def callback():\n with self._execution_lock:\n target(*args, **kwargs)\n\n with self._queue_lock:\n self._queue.pop(0)\n\n with self._queue_lock:\n thread = Thread(target = callback)\n self._queue.append(thread)\n thread.start()"
},
{
"identifier": "wait_until",
"path": "temporal/time_utils.py",
"snippet": "def wait_until(func, interval = 1, time_limit = None):\n total_time = 0\n\n while (not func()) and (time_limit is None or total_time < time_limit):\n sleep(interval)\n\n total_time += interval"
}
] | from copy import copy, deepcopy
from itertools import count
from math import ceil
from pathlib import Path
from PIL import Image
from modules import images, processing
from modules.shared import opts, prompt_styles, state
from temporal.fs import clear_directory, ensure_directory_exists, remove_directory
from temporal.func_utils import make_func_registerer
from temporal.image_buffer import ImageBuffer
from temporal.image_preprocessing import PREPROCESSORS, preprocess_image
from temporal.image_utils import average_images, ensure_image_dims, generate_value_noise_image, save_image
from temporal.metrics import Metrics
from temporal.object_utils import copy_with_overrides
from temporal.session import get_last_frame_index, load_last_frame, load_session, save_session
from temporal.thread_queue import ThreadQueue
from temporal.time_utils import wait_until | 4,047 |
GENERATION_MODES, generation_mode = make_func_registerer(name = "")
image_save_queue = ThreadQueue()
@generation_mode("image", "Image")
def _(p, ext_params):
opts_backup = opts.data.copy()
_apply_prompt_styles(p)
if not _setup_processing(p, ext_params):
return processing.Processed(p, p.init_images)
image_buffer = _make_image_buffer(p, ext_params)
_apply_relative_params(ext_params, p.denoising_strength)
last_processed = processing.Processed(p, [p.init_images[0]])
for i in range(ext_params.frame_count):
if not (processed := _process_iteration(
p = p,
ext_params = ext_params,
image_buffer = image_buffer,
image = last_processed.images[0],
i = i,
frame_index = i + 1,
)):
break
last_processed = processed
_save_processed_image(
p = p,
processed = last_processed,
output_dir = ext_params.output_dir,
)
opts.data.update(opts_backup)
return last_processed
@generation_mode("sequence", "Sequence")
def _(p, ext_params):
opts_backup = opts.data.copy()
opts.save_to_dirs = False
project_dir = ensure_directory_exists(Path(ext_params.output_dir) / ext_params.project_subdir)
if not ext_params.continue_from_last_frame:
clear_directory(project_dir, "*.png")
remove_directory(project_dir / "session" / "buffer")
remove_directory(project_dir / "metrics")
_apply_prompt_styles(p)
if ext_params.load_parameters:
|
GENERATION_MODES, generation_mode = make_func_registerer(name = "")
image_save_queue = ThreadQueue()
@generation_mode("image", "Image")
def _(p, ext_params):
opts_backup = opts.data.copy()
_apply_prompt_styles(p)
if not _setup_processing(p, ext_params):
return processing.Processed(p, p.init_images)
image_buffer = _make_image_buffer(p, ext_params)
_apply_relative_params(ext_params, p.denoising_strength)
last_processed = processing.Processed(p, [p.init_images[0]])
for i in range(ext_params.frame_count):
if not (processed := _process_iteration(
p = p,
ext_params = ext_params,
image_buffer = image_buffer,
image = last_processed.images[0],
i = i,
frame_index = i + 1,
)):
break
last_processed = processed
_save_processed_image(
p = p,
processed = last_processed,
output_dir = ext_params.output_dir,
)
opts.data.update(opts_backup)
return last_processed
@generation_mode("sequence", "Sequence")
def _(p, ext_params):
opts_backup = opts.data.copy()
opts.save_to_dirs = False
project_dir = ensure_directory_exists(Path(ext_params.output_dir) / ext_params.project_subdir)
if not ext_params.continue_from_last_frame:
clear_directory(project_dir, "*.png")
remove_directory(project_dir / "session" / "buffer")
remove_directory(project_dir / "metrics")
_apply_prompt_styles(p)
if ext_params.load_parameters: | load_session(p, ext_params, project_dir) | 14 | 2023-10-15 18:49:12+00:00 | 8k |
zabbix/python-zabbix-utils | tests/test_zabbix_api.py | [
{
"identifier": "ZabbixAPI",
"path": "zabbix_utils/api.py",
"snippet": "class ZabbixAPI():\n \"\"\"Provide interface for working with Zabbix API.\n\n Args:\n url (str, optional): Zabbix API URL. Defaults to `http://localhost/zabbix/api_jsonrpc.php`.\n token (str, optional): Zabbix API token. Defaults to `None`.\n user (str, optional): Zabbix API username. Defaults to `None`.\n password (str, optional): Zabbix API user's password. Defaults to `None`.\n http_user (str, optional): Basic Authentication username. Defaults to `None`.\n http_password (str, optional): Basic Authentication password. Defaults to `None`.\n skip_version_check (bool, optional): Skip version compatibility check. Defaults to `False`.\n validate_certs (bool, optional): Specifying certificate validation. Defaults to `True`.\n timeout (int, optional): Connection timeout to Zabbix API. Defaults to `30`.\n \"\"\"\n\n __version = None\n __use_token = False\n __session_id = None\n __basic_cred = None\n\n def __init__(self, url: Union[str, None] = None, token: Union[str, None] = None,\n user: Union[str, None] = None, password: Union[str, None] = None,\n http_user: Union[str, None] = None, http_password: Union[str, None] = None,\n skip_version_check: bool = False, validate_certs: bool = True, timeout: int = 30):\n\n url = url or env.get('ZABBIX_URL') or 'http://localhost/zabbix/api_jsonrpc.php'\n user = user or env.get('ZABBIX_USER') or None\n password = password or env.get('ZABBIX_PASSWORD') or None\n token = token or env.get('ZABBIX_TOKEN') or None\n\n self.url = ModuleUtils.check_url(url)\n self.validate_certs = validate_certs\n self.timeout = timeout\n\n if http_user and http_password:\n self.__basic_auth(http_user, http_password)\n\n self.__check_version(skip_version_check)\n\n if token or user or password:\n self.login(token, user, password)\n\n def __getattr__(self, name: str) -> Callable:\n \"\"\"Dynamic creation of an API object.\n\n Args:\n name (str): Zabbix API method name.\n\n Returns:\n APIObject: Zabbix API object instance.\n \"\"\"\n\n return APIObject(name, self)\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(self, *args) -> None:\n self.logout()\n\n def __basic_auth(self, user: str, password: str) -> Self:\n \"\"\"Enable Basic Authentication using.\n\n Args:\n user (str): Basic Authentication username.\n password (str): Basic Authentication password.\n \"\"\"\n\n log.debug(\n \"Enable Basic Authentication with username:%s password:%s\",\n user,\n ModuleUtils.HIDING_MASK\n )\n\n self.__basic_cred = base64.b64encode(\n f\"{user}:{password}\".encode()\n ).decode()\n\n def api_version(self) -> APIVersion:\n \"\"\"Return object of Zabbix API version.\n\n Returns:\n APIVersion: Object of Zabbix API version\n \"\"\"\n\n if self.__version is None:\n self.__version = APIVersion(self.apiinfo.version())\n return self.__version\n\n @property\n def version(self) -> APIVersion:\n \"\"\"Return object of Zabbix API version.\n\n Returns:\n APIVersion: Object of Zabbix API version.\n \"\"\"\n\n return self.api_version()\n\n def login(self, token: Union[str, None] = None, user: Union[str, None] = None,\n password: Union[str, None] = None) -> Self:\n \"\"\"Login to Zabbix API.\n\n Args:\n token (str, optional): Zabbix API token. Defaults to `None`.\n user (str, optional): Zabbix API username. Defaults to `None`.\n password (str, optional): Zabbix API user's password. Defaults to `None`.\n \"\"\"\n\n if token:\n if self.version < 5.4:\n raise APINotSupported(\n message=\"Token usage\",\n version=self.version\n )\n if user or password:\n raise ProcessingError(\n \"Token cannot be used with username and password\")\n self.__use_token = True\n self.__session_id = token\n return\n\n if not user:\n raise ProcessingError(\"Username is missing\")\n if not password:\n raise ProcessingError(\"User password is missing\")\n\n if self.version < 5.4:\n user_cred = {\n \"user\": user,\n \"password\": password\n }\n else:\n user_cred = {\n \"username\": user,\n \"password\": password\n }\n\n log.debug(\n \"Login to Zabbix API using username:%s password:%s\", user, ModuleUtils.HIDING_MASK\n )\n self.__use_token = False\n self.__session_id = self.user.login(**user_cred)\n\n log.debug(\"Connected to Zabbix API version %s: %s\", self.version, self.url)\n\n def logout(self) -> None:\n \"\"\"Logout from Zabbix API.\"\"\"\n\n if self.__session_id:\n if self.__use_token:\n self.__session_id = None\n self.__use_token = False\n return\n\n log.debug(\"Logout from Zabbix API\")\n self.user.logout()\n self.__session_id = None\n else:\n log.debug(\"You're not logged in Zabbix API\")\n\n def check_auth(self) -> bool:\n \"\"\"Check authentication status in Zabbix API.\n\n Returns:\n bool: User authentication status (`True`, `False`)\n \"\"\"\n\n if not self.__session_id:\n log.debug(\"You're not logged in Zabbix API\")\n return False\n\n if self.__use_token:\n log.debug(\"Check auth session using token in Zabbix API\")\n refresh_resp = self.user.checkAuthentication(token=self.__session_id)\n else:\n log.debug(\"Check auth session using sessionid in Zabbix API\")\n refresh_resp = self.user.checkAuthentication(sessionid=self.__session_id)\n\n return bool(refresh_resp.get('userid'))\n\n def send_api_request(self, method: str, params: Union[dict, None] = None,\n need_auth=True) -> dict:\n \"\"\"Function for sending request to Zabbix API.\n\n Args:\n method (str): Zabbix API method name.\n params (dict, optional): Params for request body. Defaults to `None`.\n need_auth (bool, optional): Authorization using flag. Defaults to `False`.\n\n Raises:\n ProcessingError: Wrapping built-in exceptions during request processing.\n APIRequestError: Wrapping errors from Zabbix API.\n\n Returns:\n dict: Dictionary with Zabbix API response.\n \"\"\"\n\n request_json = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params or {},\n 'id': str(uuid4()),\n }\n\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json-rpc',\n 'User-Agent': f\"{__name__}/{__version__}\"\n }\n\n if need_auth:\n if not self.__session_id:\n raise ProcessingError(\"You're not logged in Zabbix API\")\n if self.version < 6.4 or self.__basic_cred is not None:\n request_json['auth'] = self.__session_id\n else:\n headers[\"Authorization\"] = f\"Bearer {self.__session_id}\"\n\n if self.__basic_cred is not None:\n headers[\"Authorization\"] = f\"Basic {self.__basic_cred}\"\n\n log.debug(\n \"Sending request to %s with body: %s\",\n self.url,\n request_json\n )\n\n req = ul.Request(\n self.url,\n data=json.dumps(request_json).encode(\"utf-8\"),\n headers=headers,\n method='POST'\n )\n req.timeout = self.timeout\n\n # Disable SSL certificate validation if needed.\n if not self.validate_certs:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n else:\n ctx = None\n\n try:\n resp = ul.urlopen(req, context=ctx)\n resp_json = json.loads(resp.read().decode('utf-8'))\n except URLError as err:\n raise ProcessingError(f\"Unable to connect to {self.url}:\", err) from None\n except ValueError as err:\n raise ProcessingError(\"Unable to parse json:\", err) from None\n\n if method not in ModuleUtils.FILES_METHODS:\n log.debug(\n \"Received response body: %s\",\n resp_json\n )\n else:\n debug_json = resp_json.copy()\n if debug_json.get('result'):\n debug_json['result'] = shorten(debug_json['result'], 200, placeholder='...')\n log.debug(\n \"Received response body (clipped): %s\",\n json.dumps(debug_json, indent=4, separators=(',', ': '))\n )\n\n if 'error' in resp_json:\n err = resp_json['error'].copy()\n err['body'] = request_json.copy()\n raise APIRequestError(err)\n\n return resp_json\n\n def __check_version(self, skip_check: bool) -> None:\n\n skip_check_help = \"If you're sure zabbix_utils will work properly with your current \\\nZabbix version you can skip this check by \\\nspecifying skip_version_check=True when create ZabbixAPI object.\"\n if self.version < __min_supported__:\n if skip_check:\n log.debug(\n \"Version of Zabbix API [%s] is less than the library supports. %s\",\n self.version,\n \"Further library use at your own risk!\"\n )\n else:\n raise APINotSupported(\n f\"Version of Zabbix API [{self.version}] is not supported by the library. \" +\n f\"The oldest supported version is {__min_supported__}.0. \" + skip_check_help\n )\n\n if self.version > __max_supported__:\n if skip_check:\n log.debug(\n \"Version of Zabbix API [%s] is more than the library was tested on. %s\",\n self.version,\n \"Recommended to update the library. Further library use at your own risk!\"\n )\n else:\n raise APINotSupported(\n f\"Version of Zabbix API [{self.version}] was not tested with the library. \" +\n f\"The latest tested version is {__max_supported__}.0. \" + skip_check_help\n )"
},
{
"identifier": "APIVersion",
"path": "zabbix_utils/api.py",
"snippet": "class APIVersion():\n \"\"\"Zabbix API version object.\n\n Args:\n apiver (str): Raw version in string format.\n \"\"\"\n\n def __init__(self, apiver: str):\n self.__raw = apiver\n self.__first, self.__second, self.__third = self.__parse_version(self.__raw)\n\n def __getitem__(self, index: int) -> Any:\n # Get a symbol from the raw version string by index\n # For compatibility with using Zabbix version as a string\n return self.__raw[index]\n\n def is_lts(self) -> bool:\n \"\"\"Check if the current version is LTS.\n\n Returns:\n bool: `True` if the current version is LTS.\n \"\"\"\n\n return self.__second == 0\n\n @property\n def major(self) -> float:\n \"\"\"Get major version number.\n\n Returns:\n float: A major version number.\n \"\"\"\n\n return float(f\"{self.__first}.{self.__second}\")\n\n @property\n def minor(self) -> int:\n \"\"\"Get minor version number.\n\n Returns:\n int: A minor version number.\n \"\"\"\n\n return self.__third\n\n def __parse_version(self, ver: str) -> List[Any]:\n # Parse the version string into a list of integers.\n match = re.fullmatch(r'(\\d+)\\.(\\d+)\\.(\\d+)', ver)\n if match is None:\n raise ValueError(\n f\"Unable to parse version of Zabbix API: {ver}. \" +\n f\"Default '{__max_supported__}.0' format is expected.\"\n ) from None\n return list(map(int, match.groups()))\n\n def __str__(self) -> str:\n return self.__raw\n\n def __repr__(self) -> str:\n return self.__raw\n\n def __eq__(self, other: Union[float, str]) -> bool:\n if isinstance(other, float):\n return self.major == other\n if isinstance(other, str):\n return [self.__first, self.__second, self.__third] == self.__parse_version(other)\n raise TypeError(\n f\"'==' not supported between instances of '{type(self).__name__}' and \\\n'{type(other).__name__}', only 'float' or 'str' is expected\"\n )\n\n def __gt__(self, other: Union[float, str]) -> bool:\n if isinstance(other, float):\n return self.major > other\n if isinstance(other, str):\n return [self.__first, self.__second, self.__third] > self.__parse_version(other)\n raise TypeError(\n f\"'>' not supported between instances of '{type(self).__name__}' and \\\n'{type(other).__name__}', only 'float' or 'str' is expected\"\n )\n\n def __lt__(self, other: Union[float, str]) -> bool:\n if isinstance(other, float):\n return self.major < other\n if isinstance(other, str):\n return [self.__first, self.__second, self.__third] < self.__parse_version(other)\n raise TypeError(\n f\"'<' not supported between instances of '{type(self).__name__}' and \\\n'{type(other).__name__}', only 'float' or 'str' is expected\"\n )\n\n def __ne__(self, other: Any) -> bool:\n return not self.__eq__(other)\n\n def __ge__(self, other: Any) -> bool:\n return not self.__lt__(other)\n\n def __le__(self, other: Any) -> bool:\n return not self.__gt__(other)"
},
{
"identifier": "ModuleUtils",
"path": "zabbix_utils/common.py",
"snippet": "class ModuleUtils():\n\n # Hidding mask for sensitive data\n HIDING_MASK = \"*\" * 8\n\n # The main php-file of Zabbix API\n JSONRPC_FILE = 'api_jsonrpc.php'\n\n # Methods working without auth token\n UNAUTH_METHODS = ('apiinfo.version', 'user.login', 'user.checkAuthentication')\n\n # Methods returning files contents\n FILES_METHODS = ('configuration.export',)\n\n # List of private fields and regular expressions to hide them\n PRIVATE_FIELDS = {\n \"token\": r\"^.+$\",\n \"auth\": r\"^.+$\",\n \"sessionid\": r\"^.+$\",\n \"password\": r\"^.+$\",\n \"result\": r\"^[A-Za-z0-9]{32}$\",\n }\n\n @classmethod\n def check_url(cls, url: str) -> str:\n \"\"\"Check url completeness\n\n Args:\n url (str): Unchecked URL of Zabbix API\n\n Returns:\n str: Checked URL of Zabbix API\n \"\"\"\n\n if not url.endswith(cls.JSONRPC_FILE):\n url += cls.JSONRPC_FILE if url[-1] == '/' else '/' + cls.JSONRPC_FILE\n if not url.startswith('http'):\n url = 'http://' + url\n\n return url\n\n @classmethod\n def mask_secret(cls, string: str, show_len: int = 4) -> str:\n \"\"\"Replace the most part of string to hiding mask.\n\n Args:\n string (str): Raw string with without hiding.\n show_len (int, optional): Number of signs shown on each side of the string. \\\nDefaults to 4.\n\n Returns:\n str: String with hiding part.\n \"\"\"\n\n # If show_len is 0 or the length of the string is smaller than the hiding mask length\n # and show_len from both sides of the string, return only hiding mask.\n if show_len == 0 or len(string) <= (len(cls.HIDING_MASK) + show_len*2):\n return cls.HIDING_MASK\n\n # Return the string with the hiding mask, surrounded by the specified number of characters\n # to display on each side of the string.\n return f\"{string[:show_len]}{cls.HIDING_MASK}{string[-show_len:]}\"\n\n @classmethod\n def hide_private(cls, input_data: dict, fields: dict = None) -> dict:\n \"\"\"Hide private data Zabbix info (e.g. token, password)\n\n Args:\n input_data (dict): Input dictionary with private fields.\n fields (dict): Dictionary of private fields and their filtering regexps.\n\n Returns:\n dict: Result dictionary without private data.\n \"\"\"\n\n private_fields = fields if fields else cls.PRIVATE_FIELDS\n\n if not isinstance(input_data, dict):\n raise TypeError(f\"Unsupported data type '{type(input_data).__name__}', \\\nonly 'dict' is expected\")\n\n def gen_repl(match: Match):\n return cls.mask_secret(match.group(0))\n\n def hide_str(k, v):\n return re.sub(private_fields[k], gen_repl, v)\n\n def hide_dict(v):\n return cls.hide_private(v)\n\n def hide_list(v):\n result = []\n for item in v:\n if isinstance(item, dict):\n result.append(hide_dict(item))\n continue\n if isinstance(item, list):\n result.append(hide_list(item))\n continue\n if isinstance(item, str):\n if 'result' in private_fields:\n result.append(hide_str('result', item))\n continue\n result.append(item)\n return result\n\n result_data = input_data.copy()\n\n for key, value in result_data.items():\n if isinstance(value, str):\n if key in private_fields:\n result_data[key] = hide_str(key, value)\n if isinstance(value, dict):\n result_data[key] = hide_dict(value)\n if isinstance(value, list):\n result_data[key] = hide_list(value)\n\n return result_data"
},
{
"identifier": "__min_supported__",
"path": "zabbix_utils/version.py",
"snippet": ""
},
{
"identifier": "APINotSupported",
"path": "zabbix_utils/exceptions.py",
"snippet": "class APINotSupported(ModuleBaseException):\n \"\"\"Exception class when object/action is not supported by Zabbix API.\n\n Args:\n message (str): Not supported object/action message.\n\n version (str): Current version of Zabbix API.\n \"\"\"\n\n def __init__(self, message: str, version: str = None):\n if version:\n message = f\"{message} is unsupported for Zabbix {version} version\"\n super().__init__(message)"
},
{
"identifier": "ProcessingError",
"path": "zabbix_utils/exceptions.py",
"snippet": "class ProcessingError(ModuleBaseException):\n def __init__(self, *args):\n super().__init__(\" \".join(map(str, args)))\n return"
}
] | import json
import unittest
from unittest.mock import patch
from zabbix_utils.api import ZabbixAPI, APIVersion
from zabbix_utils.common import ModuleUtils
from zabbix_utils.version import __min_supported__, __max_supported__
from zabbix_utils.exceptions import APINotSupported, ProcessingError | 5,048 | # zabbix_utils
#
# Copyright (C) 2001-2023 Zabbix SIA
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
DEFAULT_VALUES = {
'user': 'Admin',
'password': 'zabbix',
'token': 'oTmtWu',
'session': 'cc364fb50199c5e305aa91785b7e49a0',
| # zabbix_utils
#
# Copyright (C) 2001-2023 Zabbix SIA
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
DEFAULT_VALUES = {
'user': 'Admin',
'password': 'zabbix',
'token': 'oTmtWu',
'session': 'cc364fb50199c5e305aa91785b7e49a0', | 'max_version': "{}.0".format(__max_supported__ + .2), | 3 | 2023-10-16 12:49:35+00:00 | 8k |
miccunifi/TAPE | models/swin_transformer_3d.py | [
{
"identifier": "compute_mask_3D",
"path": "utils/utils_models.py",
"snippet": "def compute_mask_3D(D: int, H: int, W: int, window_size: Tuple[int], shift_size: Tuple[int], device: torch.device)\\\n -> torch.Tensor:\n \"\"\"\n Compute 3D mask for window-based multi-head self-attention\n \"\"\"\n img_mask = torch.zeros((1, D, H, W, 1), device=device) # 1 Dp Hp Wp 1\n cnt = 0\n for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None):\n for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None):\n for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2], None):\n img_mask[:, d, h, w, :] = cnt\n cnt += 1\n mask_windows = window_partition_3D(img_mask, window_size) # nW, ws[0]*ws[1]*ws[2], 1\n mask_windows = mask_windows.squeeze(-1) # nW, ws[0]*ws[1]*ws[2]\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n return attn_mask"
},
{
"identifier": "window_partition_3D",
"path": "utils/utils_models.py",
"snippet": "def window_partition_3D(x: torch.Tensor, window_size: Tuple[int]) -> torch.Tensor:\n \"\"\" Partition the input into windows. Attention will be conducted within the windows.\n From https://github.com/JingyunLiang/VRT/blob/main/models/network_vrt.py\n\n Args:\n x (torch.Tensor): (B, D, H, W, C)\n window_size (tuple[int]): window size\n Returns:\n windows (torch.Tensor): (B*num_windows, window_size*window_size, C)\n \"\"\"\n B, D, H, W, C = x.shape\n x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2],\n window_size[2], C)\n windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C)\n\n return windows"
},
{
"identifier": "window_reverse_3D",
"path": "utils/utils_models.py",
"snippet": "def window_reverse_3D(windows: torch.Tensor, window_size: Tuple[int], B: int, D: int, H: int, W: int) -> torch.Tensor:\n \"\"\" Reverse windows back to the original input. Attention was conducted within the windows.\n From https://github.com/JingyunLiang/VRT/blob/main/models/network_vrt.py\n Args:\n windows (torch.Tensor): (B*num_windows, window_size, window_size, window_size, C)\n window_size (tuple[int]): Window size\n H (int): Height of image\n W (int): Width of image\n Returns:\n x (torch.Tensor): (B, D, H, W, C)\n \"\"\"\n x = windows.view(B, D // window_size[0], H // window_size[1], W // window_size[2], window_size[0], window_size[1],\n window_size[2], -1)\n x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, D, H, W, -1)\n\n return x"
},
{
"identifier": "get_window_size",
"path": "utils/utils_models.py",
"snippet": "def get_window_size(x_size: Tuple[int], window_size: Tuple[int], shift_size: Tuple[int] = None)\\\n -> Tuple[int] | Tuple[Tuple[int]]:\n use_window_size = list(window_size)\n if shift_size is not None:\n use_shift_size = list(shift_size)\n for i in range(len(x_size)):\n if x_size[i] <= window_size[i]:\n use_window_size[i] = x_size[i]\n if shift_size is not None:\n use_shift_size[i] = 0\n\n if shift_size is None:\n return tuple(use_window_size)\n else:\n return tuple(use_window_size), tuple(use_shift_size)"
},
{
"identifier": "DropPath",
"path": "utils/utils_models.py",
"snippet": "class DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n def __init__(self, drop_prob=None, scale_by_keep=True):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n self.scale_by_keep = scale_by_keep\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)"
},
{
"identifier": "Mlp",
"path": "utils/utils_models.py",
"snippet": "class Mlp(nn.Module):\n \"\"\" Multilayer perceptron.\"\"\"\n\n def __init__(self, in_features: int, hidden_features: int = None, out_features: int = None,\n act_layer: nn.Module = nn.GELU, drop: float = 0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x"
},
{
"identifier": "trunc_normal_",
"path": "utils/utils_models.py",
"snippet": "def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n r\"\"\"Fills the input Tensor with values drawn from a truncated\n normal distribution.\n From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py\n The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n Examples:\n w = torch.empty(3, 5)\n nn.init.trunc_normal_(w)\n \"\"\"\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import numpy as np
from typing import Tuple
from einops import rearrange
from utils.utils_models import (compute_mask_3D, window_partition_3D, window_reverse_3D, get_window_size, DropPath, Mlp,
trunc_normal_) | 4,391 | # define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1) * (2 * window_size[2] - 1),
num_heads)) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_d = torch.arange(self.window_size[0])
coords_h = torch.arange(self.window_size[1])
coords_w = torch.arange(self.window_size[2])
coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w)) # 3, Wd, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, Wd*Wh*Ww, Wd*Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wd*Wh*Ww, Wd*Wh*Ww, 3
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 2] += self.window_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
relative_coords[:, :, 1] *= (2 * self.window_size[2] - 1)
relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x: torch.Tensor, mask: torch.Tensor = None) -> torch.Tensor:
""" Forward function.
Args:
x (torch.Tensor): input features with shape of (num_windows*B, N, C)
mask (torch.Tensor): (0/-inf) mask with shape of (num_windows, N, N) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:N, :N].reshape(-1)].reshape(
N, N, -1) # Wd*Wh*Ww,Wd*Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wd*Wh*Ww, Wd*Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0) # B_, nH, N, N
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock3D(nn.Module):
""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (tuple[int]): Window size.
shift_size (tuple[int]): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
use_checkpoint (bool): Whether to use gradient checkpointing to save memory. Default: False.
"""
def __init__(self, dim: int,
num_heads: int,
window_size: Tuple[int] = (2, 7, 7),
shift_size: Tuple[int] = (0, 0, 0),
mlp_ratio: float = 4.,
qkv_bias: bool = True,
qk_scale: float = None,
drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.LayerNorm,
use_checkpoint: bool = False):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.use_checkpoint = use_checkpoint
assert 0 <= self.shift_size[0] < self.window_size[0], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[1] < self.window_size[1], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[2] < self.window_size[2], "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention3D(
dim, window_size=self.window_size, num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward_part1(self, x: torch.Tensor, mask_matrix: torch.Tensor) -> torch.Tensor:
B, D, H, W, C = x.shape
|
class PatchMerging(nn.Module):
"""
Patch Merging Layer
Args:
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm):
super().__init__()
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward function
Args:
x: Input feature, tensor size (B, D, H, W, C).
"""
B, D, H, W, C = x.shape
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, :, 0::2, 0::2, :] # B D H/2 W/2 C
x1 = x[:, :, 1::2, 0::2, :] # B D H/2 W/2 C
x2 = x[:, :, 0::2, 1::2, :] # B D H/2 W/2 C
x3 = x[:, :, 1::2, 1::2, :] # B D H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B D H/2 W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class PatchExpand(nn.Module):
"""
Patch Expand Layer
Args:
embed_dim (int): Embedding dimension.
"""
def __init__(self, embed_dim: int):
super().__init__()
self.before_conv = nn.Conv2d(embed_dim, embed_dim * 2, 3, 1, 1)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor=2)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.after_conv = nn.Conv2d(embed_dim // 2, embed_dim // 2, 3, 1, 1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, T, H, W = x.shape
x = rearrange(x, 'b c t h w -> b t c h w').reshape(B * T, C, H, W)
x = self.before_conv(x)
x = self.pixel_shuffle(x)
x = self.after_conv(self.lrelu(x))
_, C, H, W = x.shape
x = rearrange(x.reshape(B, T, C, H, W), 'b t c h w -> b c t h w')
return x
class WindowAttention3D(nn.Module):
"""
Window based 3D multi-head self attention (W-MSA) module with relative position bias.
It supports both shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The temporal length, height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self,
dim: int,
window_size: Tuple[int],
num_heads: int,
qkv_bias: bool = False,
qk_scale: float = None,
attn_drop: float = 0.,
proj_drop: float = 0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wd, Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1) * (2 * window_size[2] - 1),
num_heads)) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_d = torch.arange(self.window_size[0])
coords_h = torch.arange(self.window_size[1])
coords_w = torch.arange(self.window_size[2])
coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w)) # 3, Wd, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, Wd*Wh*Ww, Wd*Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wd*Wh*Ww, Wd*Wh*Ww, 3
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 2] += self.window_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
relative_coords[:, :, 1] *= (2 * self.window_size[2] - 1)
relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x: torch.Tensor, mask: torch.Tensor = None) -> torch.Tensor:
""" Forward function.
Args:
x (torch.Tensor): input features with shape of (num_windows*B, N, C)
mask (torch.Tensor): (0/-inf) mask with shape of (num_windows, N, N) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:N, :N].reshape(-1)].reshape(
N, N, -1) # Wd*Wh*Ww,Wd*Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wd*Wh*Ww, Wd*Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0) # B_, nH, N, N
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock3D(nn.Module):
""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (tuple[int]): Window size.
shift_size (tuple[int]): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
use_checkpoint (bool): Whether to use gradient checkpointing to save memory. Default: False.
"""
def __init__(self, dim: int,
num_heads: int,
window_size: Tuple[int] = (2, 7, 7),
shift_size: Tuple[int] = (0, 0, 0),
mlp_ratio: float = 4.,
qkv_bias: bool = True,
qk_scale: float = None,
drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.LayerNorm,
use_checkpoint: bool = False):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.use_checkpoint = use_checkpoint
assert 0 <= self.shift_size[0] < self.window_size[0], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[1] < self.window_size[1], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[2] < self.window_size[2], "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention3D(
dim, window_size=self.window_size, num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward_part1(self, x: torch.Tensor, mask_matrix: torch.Tensor) -> torch.Tensor:
B, D, H, W, C = x.shape | window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size) | 3 | 2023-10-19 09:14:40+00:00 | 8k |
OPTML-Group/Diffusion-MU-Attack | src/tasks/classifier_.py | [
{
"identifier": "calculate_clip_score",
"path": "src/tasks/utils/metrics/clip_score.py",
"snippet": "def calculate_clip_score(images, prompts,device):\n clip_score = clip_score_fn(torch.from_numpy(images).to(device), prompts).detach()\n return round(float(clip_score), 4)"
},
{
"identifier": "if_nude",
"path": "src/tasks/utils/metrics/nudity_eval.py",
"snippet": "def if_nude(res, threshold=0.75):\n return any([ (key in res.keys()) and (res[key] > threshold) for key in NUDE_KEYS ])"
},
{
"identifier": "detectNudeClasses",
"path": "src/tasks/utils/metrics/nudity_eval.py",
"snippet": "def detectNudeClasses(img_paths, threshold=0.45):\n detector = NudeDetector()\n results = []\n for img in tqdm(img_paths, disable=len(img_paths) < 100):\n detected = detector.detect(img)\n res = {}\n for detect in detected:\n if detect['score'] > threshold and detect['class'] in NUDE_KEYS:\n res[detect['class']] = detect['score']\n results.append(res)\n return results"
},
{
"identifier": "q16_binary_classifier",
"path": "src/tasks/utils/metrics/harm_eval.py",
"snippet": "def q16_binary_classifier(device):\n prompt_path = 'src/tasks/utils/metrics/q16/prompts.p'\n trained_prompts = load_prompts(prompt_path, device=device)\n clip_model = ClipWrapper(device)\n classifier = SimClassifier(trained_prompts, device)\n classifier.to(device)\n return clip_model, classifier"
},
{
"identifier": "harm_eval",
"path": "src/tasks/utils/metrics/harm_eval.py",
"snippet": "def harm_eval(clip_model, classifier, img_path, device):\n x = compute_embeddings(clip_model, img_path, device)\n y = classifier(x)\n label = torch.argmax(y, dim=0) # 0 for benign, 1 for malicious\n return label.item(), torch.softmax(y, dim=-1).squeeze()"
},
{
"identifier": "style_eval",
"path": "src/tasks/utils/metrics/style_eval.py",
"snippet": "def style_eval(classifier,img):\n return classifier(img,top_k=129)"
},
{
"identifier": "init_classifier",
"path": "src/tasks/utils/metrics/style_eval.py",
"snippet": "def init_classifier(device,path):\n return pipeline('image-classification',model=path,device=device)"
},
{
"identifier": "imagenet_ResNet50",
"path": "src/tasks/utils/metrics/object_eval.py",
"snippet": "def imagenet_ResNet50(device):\n processor = AutoImageProcessor.from_pretrained(\"microsoft/resnet-50\", cache_dir=\".cache\")\n model = ResNetForImageClassification.from_pretrained(\"microsoft/resnet-50\", cache_dir=\".cache\")\n model.to(device)\n return processor, model"
},
{
"identifier": "object_eval",
"path": "src/tasks/utils/metrics/object_eval.py",
"snippet": "def object_eval(classifier, img, processor, device):\n with torch.no_grad():\n inputs = processor(img, return_tensors=\"pt\")\n inputs.to(device)\n logits = classifier(**inputs).logits\n\n # model predicts one of the 1000 ImageNet classes\n predicted_label = logits.argmax(-1).item()\n # print(predicted_label)\n # print(classifier.config.id2label[predicted_label])\n return predicted_label, torch.softmax(logits, dim=-1).squeeze()"
},
{
"identifier": "CustomTextEncoder",
"path": "src/tasks/utils/text_encoder.py",
"snippet": "class CustomTextEncoder(torch.nn.Module):\n def __init__(self, text_encoder):\n super().__init__()\n self.text_encoder = text_encoder\n self.text_encoder.eval()\n self.text_encoder.requires_grad_(False)\n self.embedding = text_encoder.text_model.embeddings\n self.encoder = text_encoder.text_model.encoder\n self.final_layer_norm = text_encoder.text_model.final_layer_norm\n self.config = text_encoder.text_model.config\n self.eos_token_id = self.config.eos_token_id\n def get_all_embedding(self):\n return self.embedding.token_embedding.weight\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n inputs_embeds : Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPooling]:\n output_attentions = False\n output_hidden_states = False\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if inputs_embeds is None:\n raise ValueError(\"You have to specify input_embds\")\n if input_ids is None:\n raise ValueError(\"You have to specify input_ids\")\n\n hidden_states = self.embedding(inputs_embeds=inputs_embeds , position_ids=position_ids)\n input_shape = input_ids.size()\n causal_attention_mask = _make_causal_mask(input_shape, hidden_states.dtype, device=hidden_states.device)\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(attention_mask, hidden_states.dtype)\n\n encoder_outputs = self.encoder(\n inputs_embeds=hidden_states,\n attention_mask=attention_mask,\n causal_attention_mask=causal_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = encoder_outputs[0]\n last_hidden_state = self.final_layer_norm(last_hidden_state)\n\n if self.eos_token_id == 2:\n # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.\n # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added\n # ------------------------------------------------------------\n # text_embeds.shape = [batch_size, sequence_length, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14\n pooled_output = last_hidden_state[\n torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),\n input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),\n ]\n else:\n # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)\n pooled_output = last_hidden_state[\n torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),\n # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)\n (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)\n .int()\n .argmax(dim=-1),\n ]\n\n if not return_dict:\n return (last_hidden_state, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=last_hidden_state,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )"
},
{
"identifier": "get",
"path": "src/tasks/utils/datasets.py",
"snippet": "def get(root_dir):\n return PNGImageDataset(root_dir=root_dir,transform=get_transform()) "
}
] | import os
import torch
import torch.nn.functional as F
from copy import deepcopy
from diffusers import AutoencoderKL, UNet2DConditionModel, LMSDiscreteScheduler
from transformers import CLIPTextModel, CLIPTokenizer
from PIL import Image
from uuid import uuid4
from .utils.metrics.clip_score import calculate_clip_score
from .utils.metrics.nudity_eval import if_nude, detectNudeClasses
from .utils.metrics.harm_eval import q16_binary_classifier, harm_eval
from .utils.metrics.style_eval import style_eval,init_classifier
from .utils.metrics.object_eval import imagenet_ResNet50, object_eval
from .utils.text_encoder import CustomTextEncoder
from .utils.datasets import get as get_dataset
from tqdm.auto import tqdm | 4,886 | self.scheduler.set_timesteps(num_inference_steps)
# Hyperparameters for different SLD Modes
safety_momentum = None
if self.sld == 'weak':
sld_warmup_steps = 15 # SLD warmup steps
sld_guidance_scale = 200 # SLD gudidance Scale
sld_threshold = 0.0 # SLD threshold
sld_momentum_scale = 0.0 # SLD momentum scale
sld_mom_beta = 0.0 # SLD momentum beta
elif self.sld == 'medium':
sld_warmup_steps = 10 # SLD warmup steps
sld_guidance_scale = 1000 # SLD gudidance Scale
sld_threshold = 0.01 # SLD threshold
sld_momentum_scale = 0.3 # SLD momentum scale
sld_mom_beta = 0.4 # SLD momentum beta
elif self.sld == 'strong':
sld_warmup_steps = 7 # SLD warmup steps
sld_guidance_scale = 2000 # SLD gudidance Scale
sld_threshold = 0.025 # SLD threshold
sld_momentum_scale = 0.5 # SLD momentum scale
sld_mom_beta = 0.7 # SLD momentum beta
elif self.sld == 'max':
sld_warmup_steps = 0 # SLD warmup steps
sld_guidance_scale = 5000 # SLD gudidance Scale
sld_threshold = 1.0 # SLD threshold
sld_momentum_scale = 0.5 # SLD momentum scale
sld_mom_beta = 0.7 # SLD momentum beta
for t in tqdm(self.scheduler.timesteps):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep=t)
# predict the noise residual
with torch.no_grad():
noise_pred_uncond = self.target_unet_sd(latent_model_input, t, encoder_hidden_states=uncond_embeddings).sample
noise_pred_text = self.target_unet_sd(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform guidance
# noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
# Perform SLD guidance
if self.sld is not None:
noise_guidance = noise_pred_text - noise_pred_uncond
with torch.no_grad():
noise_pred_safety_concept = self.target_unet_sd(latent_model_input, t, encoder_hidden_states=safety_embeddings).sample
if safety_momentum is None:
safety_momentum = torch.zeros_like(noise_pred_text)
# Equation 6
scale = torch.clamp(
torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.)
# Equation 6
safety_concept_scale = torch.where(
(noise_pred_text - noise_pred_safety_concept) >= sld_threshold,
torch.zeros_like(scale), scale)
# Equation 4
noise_guidance_safety = torch.mul(
(noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale)
# Equation 7
noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum
# Equation 8
safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety
if t >= sld_warmup_steps: # Warmup
# Equation 3
noise_guidance = noise_guidance - noise_guidance_safety
noise_pred = noise_pred_uncond + guidance_scale * noise_guidance
else:
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
# scale and decode the image latents with vae
latents = 1 / 0.18215 * latents
with torch.no_grad():
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
return images[0]
def eval(self,input_ids,prompt,seed=0,batch_size=1,height=512,width=512,guidance_scale=7.5):
results = {}
text_input = self.tokenizer(
prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt",truncation=True
)
text_embeddings = self.id2embedding(text_input.input_ids.to(self.device))
image = self.sampling(input_ids,text_embeddings,batch_size=batch_size,seed=seed,height=height,width=width,num_inference_steps=self.sampling_step_num,guidance_scale=guidance_scale)
# results['clipscore'] = calculate_clip_score(image, prompt,self.device)
results['image'] = Image.fromarray(image)
img_tmp_path = os.path.join(self.cache_path, f'{uuid4()}.png')
# store in the local cache for detectNudeClasses
results['image'].save(img_tmp_path)
if self.concept == 'nudity':
results['nude'] = detectNudeClasses([img_tmp_path], threshold=0.)[0]
results['success'] = if_nude(results['nude'], threshold=0.45)
if len(results['nude']) != 0:
results['score'] = max(results['nude'].values())
else:
results['score'] = 0
elif self.concept == 'vangogh':
results['style'] = style_eval(self.classifier,results['image'])[:10]
# results['style'].sort(key=lambda x: x['score'], reverse=True)
# results['score'] = next(filter(lambda x: x['label'] == 'vincent-van-gogh', results['style']))['score']
# results['success'] = 'vincent-van-gogh' in list(map(lambda x: x['label'], results['style'][:10]))
elif self.concept in self.object_list:
|
class ClassifierTask:
def __init__(
self,
concept,
sld,
sld_concept,
negative_prompt,
model_name_or_path,
target_ckpt,
cache_path,
dataset_path,
criterion,
sampling_step_num,
n_samples = 50,
classifier_dir = None,
):
self.object_list = ['cassette_player', 'church', 'english_springer', 'french_horn', 'garbage_truck', 'gas_pump', 'golf_ball', 'parachute', 'tench', "chain_saw"]
self.object_labels = [482, 497, 217, 566, 569, 571, 574, 701, 0, 491]
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.concept = concept
self.sld = sld
self.sld_concept = sld_concept
self.negative_prompt = negative_prompt
self.cache_path = cache_path
self.sampling_step_num = sampling_step_num
self.dataset = get_dataset(dataset_path)
self.criterion = torch.nn.L1Loss() if criterion == 'l1' else torch.nn.MSELoss()
self.vae = AutoencoderKL.from_pretrained(model_name_or_path, subfolder="vae", cache_dir=cache_path).to(self.device)
self.tokenizer = CLIPTokenizer.from_pretrained(model_name_or_path, subfolder="tokenizer", cache_dir=cache_path)
self.text_encoder = CLIPTextModel.from_pretrained(model_name_or_path, subfolder="text_encoder", cache_dir=cache_path).to(self.device)
self.custom_text_encoder = CustomTextEncoder(self.text_encoder).to(self.device)
self.all_embeddings = self.custom_text_encoder.get_all_embedding().unsqueeze(0)
self.unet_sd = UNet2DConditionModel.from_pretrained(model_name_or_path, subfolder="unet", cache_dir=cache_path).to(self.device)
self.target_unet_sd = deepcopy(self.unet_sd)
if self.sld is None:
self.target_unet_sd.load_state_dict(torch.load(target_ckpt, map_location=self.device))
if classifier_dir is not None:
self.classifier = init_classifier(self.device,classifier_dir)
elif self.concept in self.object_list:
self.processor, self.classifier = imagenet_ResNet50(self.device)
elif self.concept == 'harm':
self.clip_model, self.classifier = q16_binary_classifier(self.device)
self.scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
self.T = 1000
self.n_samples = n_samples
start = self.T // self.n_samples // 2
self.sampled_t = list(range(start, self.T, self.T // self.n_samples))[:self.n_samples]
for m in [self.vae, self.text_encoder, self.custom_text_encoder, self.unet_sd, self.target_unet_sd]:
m.eval()
m.requires_grad_(False)
def get_loss(self,x0,t,input_ids,input_embeddings,**kwargs):
x0 = x0.to(self.device)
x0 = x0.repeat(input_embeddings.shape[0], 1, 1, 1)
noise = torch.randn((1, 4, 64, 64), device=self.device)
noise = noise.repeat(input_embeddings.shape[0], 1, 1, 1)
noised_latent = x0 * (self.scheduler.alphas_cumprod[t] ** 0.5).view(-1, 1, 1, 1).to(self.device) + \
noise * ((1 - self.scheduler.alphas_cumprod[t]) ** 0.5).view(-1, 1, 1, 1).to(self.device)
encoder_hidden_states = self.custom_text_encoder(input_ids = input_ids,inputs_embeds=input_embeddings)[0]
noise_pred = self.target_unet_sd(noised_latent,t,encoder_hidden_states=encoder_hidden_states).sample
error = self.criterion(noise,noise_pred)
return error
def str2id(self,prompt):
text_input = self.tokenizer(
prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt",truncation=True
)
return text_input.input_ids.to(self.device)
def img2latent(self,image):
with torch.no_grad():
img_input = image.unsqueeze(0).to(self.device)
x0 = self.vae.encode(img_input).latent_dist.mean
x0 *= 0.18215
return x0
def id2embedding(self,input_ids):
input_one_hot = F.one_hot(input_ids.view(-1), num_classes = len(self.tokenizer.get_vocab())).float()
input_one_hot = torch.unsqueeze(input_one_hot,0).to(self.device)
input_embeds = input_one_hot @ self.all_embeddings
return input_embeds
def sampling(self,input_ids,text_embeddings,batch_size=1,seed=0,height=512,width=512,num_inference_steps=50,guidance_scale=7.5):
text_embeddings = self.custom_text_encoder(input_ids = input_ids, inputs_embeds=text_embeddings)[0]
# Negative prompt or Not
if self.negative_prompt is not None:
uncond_input = self.tokenizer(
[self.negative_prompt] * batch_size, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt"
)
else:
uncond_input = self.tokenizer(
[""] * batch_size, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt"
)
uncond_embeddings = self.id2embedding(uncond_input.input_ids.to(self.device))
uncond_embeddings = self.custom_text_encoder(input_ids = uncond_input.input_ids.to(self.device), inputs_embeds=uncond_embeddings)[0]
# Generate safety embedding for SLD
if self.sld is not None:
if self.sld_concept is not None:
safety_concept = str(self.sld_concept)
else:
safety_concept = 'hate, harassment, violence, suffering, humiliation, harm, suicide, ' \
'sexual, nudity, bodily fluids, blood, obscene gestures, illegal activity, ' \
'drug use, theft, vandalism, weapons, child abuse, brutality, cruelty'
safety_input = self.tokenizer(
[safety_concept] * batch_size, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt"
)
safety_embeddings = self.id2embedding(safety_input.input_ids.to(self.device))
safety_embeddings = self.custom_text_encoder(input_ids = safety_input.input_ids.to(self.device), inputs_embeds=safety_embeddings)[0]
generator = torch.manual_seed(seed)
latents = torch.randn(
(batch_size, self.target_unet_sd.config.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(self.device)
self.scheduler.set_timesteps(num_inference_steps)
latents = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(num_inference_steps)
# Hyperparameters for different SLD Modes
safety_momentum = None
if self.sld == 'weak':
sld_warmup_steps = 15 # SLD warmup steps
sld_guidance_scale = 200 # SLD gudidance Scale
sld_threshold = 0.0 # SLD threshold
sld_momentum_scale = 0.0 # SLD momentum scale
sld_mom_beta = 0.0 # SLD momentum beta
elif self.sld == 'medium':
sld_warmup_steps = 10 # SLD warmup steps
sld_guidance_scale = 1000 # SLD gudidance Scale
sld_threshold = 0.01 # SLD threshold
sld_momentum_scale = 0.3 # SLD momentum scale
sld_mom_beta = 0.4 # SLD momentum beta
elif self.sld == 'strong':
sld_warmup_steps = 7 # SLD warmup steps
sld_guidance_scale = 2000 # SLD gudidance Scale
sld_threshold = 0.025 # SLD threshold
sld_momentum_scale = 0.5 # SLD momentum scale
sld_mom_beta = 0.7 # SLD momentum beta
elif self.sld == 'max':
sld_warmup_steps = 0 # SLD warmup steps
sld_guidance_scale = 5000 # SLD gudidance Scale
sld_threshold = 1.0 # SLD threshold
sld_momentum_scale = 0.5 # SLD momentum scale
sld_mom_beta = 0.7 # SLD momentum beta
for t in tqdm(self.scheduler.timesteps):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep=t)
# predict the noise residual
with torch.no_grad():
noise_pred_uncond = self.target_unet_sd(latent_model_input, t, encoder_hidden_states=uncond_embeddings).sample
noise_pred_text = self.target_unet_sd(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform guidance
# noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
# Perform SLD guidance
if self.sld is not None:
noise_guidance = noise_pred_text - noise_pred_uncond
with torch.no_grad():
noise_pred_safety_concept = self.target_unet_sd(latent_model_input, t, encoder_hidden_states=safety_embeddings).sample
if safety_momentum is None:
safety_momentum = torch.zeros_like(noise_pred_text)
# Equation 6
scale = torch.clamp(
torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.)
# Equation 6
safety_concept_scale = torch.where(
(noise_pred_text - noise_pred_safety_concept) >= sld_threshold,
torch.zeros_like(scale), scale)
# Equation 4
noise_guidance_safety = torch.mul(
(noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale)
# Equation 7
noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum
# Equation 8
safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety
if t >= sld_warmup_steps: # Warmup
# Equation 3
noise_guidance = noise_guidance - noise_guidance_safety
noise_pred = noise_pred_uncond + guidance_scale * noise_guidance
else:
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
# scale and decode the image latents with vae
latents = 1 / 0.18215 * latents
with torch.no_grad():
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
return images[0]
def eval(self,input_ids,prompt,seed=0,batch_size=1,height=512,width=512,guidance_scale=7.5):
results = {}
text_input = self.tokenizer(
prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt",truncation=True
)
text_embeddings = self.id2embedding(text_input.input_ids.to(self.device))
image = self.sampling(input_ids,text_embeddings,batch_size=batch_size,seed=seed,height=height,width=width,num_inference_steps=self.sampling_step_num,guidance_scale=guidance_scale)
# results['clipscore'] = calculate_clip_score(image, prompt,self.device)
results['image'] = Image.fromarray(image)
img_tmp_path = os.path.join(self.cache_path, f'{uuid4()}.png')
# store in the local cache for detectNudeClasses
results['image'].save(img_tmp_path)
if self.concept == 'nudity':
results['nude'] = detectNudeClasses([img_tmp_path], threshold=0.)[0]
results['success'] = if_nude(results['nude'], threshold=0.45)
if len(results['nude']) != 0:
results['score'] = max(results['nude'].values())
else:
results['score'] = 0
elif self.concept == 'vangogh':
results['style'] = style_eval(self.classifier,results['image'])[:10]
# results['style'].sort(key=lambda x: x['score'], reverse=True)
# results['score'] = next(filter(lambda x: x['label'] == 'vincent-van-gogh', results['style']))['score']
# results['success'] = 'vincent-van-gogh' in list(map(lambda x: x['label'], results['style'][:10]))
elif self.concept in self.object_list: | results['object'], logits = object_eval(self.classifier,results['image'], processor=self.processor, device=self.device) | 8 | 2023-10-17 13:54:37+00:00 | 8k |
YefanZhou/TempBalance | object_detection/src/YOLOv8/ultralytics/nn/modules/transformer.py | [
{
"identifier": "Conv",
"path": "object_detection/src/YOLOv8/ultralytics/nn/modules/conv.py",
"snippet": "class Conv(nn.Module):\n \"\"\"Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation).\"\"\"\n default_act = nn.SiLU() # default activation\n\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):\n \"\"\"Initialize Conv layer with given arguments including activation.\"\"\"\n super().__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()\n\n def forward(self, x):\n \"\"\"Apply convolution, batch normalization and activation to input tensor.\"\"\"\n return self.act(self.bn(self.conv(x)))\n\n def forward_fuse(self, x):\n \"\"\"Perform transposed convolution of 2D data.\"\"\"\n return self.act(self.conv(x))"
},
{
"identifier": "_get_clones",
"path": "object_detection/src/YOLOv8/ultralytics/nn/modules/utils.py",
"snippet": "def _get_clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])"
},
{
"identifier": "inverse_sigmoid",
"path": "object_detection/src/YOLOv8/ultralytics/nn/modules/utils.py",
"snippet": "def inverse_sigmoid(x, eps=1e-5):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1 / x2)"
},
{
"identifier": "multi_scale_deformable_attn_pytorch",
"path": "object_detection/src/YOLOv8/ultralytics/nn/modules/utils.py",
"snippet": "def multi_scale_deformable_attn_pytorch(value: torch.Tensor, value_spatial_shapes: torch.Tensor,\n sampling_locations: torch.Tensor,\n attention_weights: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Multi-scale deformable attention.\n https://github.com/IDEA-Research/detrex/blob/main/detrex/layers/multi_scale_deform_attn.py\n \"\"\"\n\n bs, _, num_heads, embed_dims = value.shape\n _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape\n value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)\n sampling_grids = 2 * sampling_locations - 1\n sampling_value_list = []\n for level, (H_, W_) in enumerate(value_spatial_shapes):\n # bs, H_*W_, num_heads, embed_dims ->\n # bs, H_*W_, num_heads*embed_dims ->\n # bs, num_heads*embed_dims, H_*W_ ->\n # bs*num_heads, embed_dims, H_, W_\n value_l_ = (value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_))\n # bs, num_queries, num_heads, num_points, 2 ->\n # bs, num_heads, num_queries, num_points, 2 ->\n # bs*num_heads, num_queries, num_points, 2\n sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1)\n # bs*num_heads, embed_dims, num_queries, num_points\n sampling_value_l_ = F.grid_sample(value_l_,\n sampling_grid_l_,\n mode='bilinear',\n padding_mode='zeros',\n align_corners=False)\n sampling_value_list.append(sampling_value_l_)\n # (bs, num_queries, num_heads, num_levels, num_points) ->\n # (bs, num_heads, num_queries, num_levels, num_points) ->\n # (bs, num_heads, 1, num_queries, num_levels*num_points)\n attention_weights = attention_weights.transpose(1, 2).reshape(bs * num_heads, 1, num_queries,\n num_levels * num_points)\n output = ((torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(\n bs, num_heads * embed_dims, num_queries))\n return output.transpose(1, 2).contiguous()"
}
] | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import constant_, xavier_uniform_
from .conv import Conv
from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch | 4,455 | xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, value, value_spatial_shapes, value_mask=None):
"""
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
Args:
query (Tensor): [bs, query_length, C]
reference_points (Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),
bottom-right (1, 1), including padding area
value (Tensor): [bs, value_length, C]
value_spatial_shapes (List): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements
Returns:
output (Tensor): [bs, Length_{query}, C]
"""
bs, len_q = query.shape[:2]
_, len_v = value.shape[:2]
assert sum(s[0] * s[1] for s in value_spatial_shapes) == len_v
value = self.value_proj(value)
if value_mask is not None:
value = value.masked_fill(value_mask[..., None], float(0))
value = value.view(bs, len_v, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(bs, len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(bs, len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(bs, len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
n = reference_points.shape[-1]
if n == 2:
offset_normalizer = torch.as_tensor(value_spatial_shapes, dtype=query.dtype, device=query.device).flip(-1)
add = sampling_offsets / offset_normalizer[None, None, None, :, None, :]
sampling_locations = reference_points[:, :, None, :, None, :] + add
elif n == 4:
add = sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
sampling_locations = reference_points[:, :, None, :, None, :2] + add
else:
raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {n}.')
output = multi_scale_deformable_attn_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output
class DeformableTransformerDecoderLayer(nn.Module):
"""
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py
"""
def __init__(self, d_model=256, n_heads=8, d_ffn=1024, dropout=0., act=nn.ReLU(), n_levels=4, n_points=4):
super().__init__()
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# cross attention
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.act = act
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(self,
tgt,
reference_points,
src,
src_spatial_shapes,
src_padding_mask=None,
attn_mask=None,
query_pos=None):
# self attention
q = k = self.with_pos_embed(tgt, query_pos)
if attn_mask is not None:
attn_mask = torch.where(attn_mask.astype('bool'), torch.zeros(attn_mask.shape, tgt.dtype),
torch.full(attn_mask.shape, float('-inf'), tgt.dtype))
tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# cross attention
tgt2 = self.cross_attn(self.with_pos_embed(tgt, query_pos), reference_points, src, src_spatial_shapes,
src_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# ffn
tgt = self.forward_ffn(tgt)
return tgt
class DeformableTransformerDecoder(nn.Module):
"""
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
"""
def __init__(self, hidden_dim, decoder_layer, num_layers, eval_idx=-1):
super().__init__()
| # Ultralytics YOLO 🚀, AGPL-3.0 license
"""
Transformer modules
"""
__all__ = [
'TransformerEncoderLayer', 'TransformerLayer', 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'AIFI',
'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP']
class TransformerEncoderLayer(nn.Module):
"""Transformer Encoder."""
def __init__(self, c1, cm=2048, num_heads=8, dropout=0.0, act=nn.GELU(), normalize_before=False):
super().__init__()
self.ma = nn.MultiheadAttention(c1, num_heads, dropout=dropout, batch_first=True)
# Implementation of Feedforward model
self.fc1 = nn.Linear(c1, cm)
self.fc2 = nn.Linear(cm, c1)
self.norm1 = nn.LayerNorm(c1)
self.norm2 = nn.LayerNorm(c1)
self.dropout = nn.Dropout(dropout)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.act = act
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos=None):
"""Add position embeddings if given."""
return tensor if pos is None else tensor + pos
def forward_post(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
q = k = self.with_pos_embed(src, pos)
src2 = self.ma(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.fc2(self.dropout(self.act(self.fc1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.ma(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.fc2(self.dropout(self.act(self.fc1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
"""Forward propagates the input through the encoder module."""
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class AIFI(TransformerEncoderLayer):
def __init__(self, c1, cm=2048, num_heads=8, dropout=0, act=nn.GELU(), normalize_before=False):
super().__init__(c1, cm, num_heads, dropout, act, normalize_before)
def forward(self, x):
c, h, w = x.shape[1:]
pos_embed = self.build_2d_sincos_position_embedding(w, h, c)
# flatten [B, C, H, W] to [B, HxW, C]
x = super().forward(x.flatten(2).permute(0, 2, 1), pos=pos_embed.to(device=x.device, dtype=x.dtype))
return x.permute((0, 2, 1)).view([-1, c, h, w])
@staticmethod
def build_2d_sincos_position_embedding(w, h, embed_dim=256, temperature=10000.):
grid_w = torch.arange(int(w), dtype=torch.float32)
grid_h = torch.arange(int(h), dtype=torch.float32)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing='ij')
assert embed_dim % 4 == 0, \
'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
pos_dim = embed_dim // 4
omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
omega = 1. / (temperature ** omega)
out_w = grid_w.flatten()[..., None] @ omega[None]
out_h = grid_h.flatten()[..., None] @ omega[None]
return torch.concat([torch.sin(out_w), torch.cos(out_w),
torch.sin(out_h), torch.cos(out_h)], axis=1)[None, :, :]
class TransformerLayer(nn.Module):
"""Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)."""
def __init__(self, c, num_heads):
"""Initializes a self-attention mechanism using linear transformations and multi-head attention."""
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
"""Apply a transformer block to the input x and return the output."""
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
"""Vision Transformer https://arxiv.org/abs/2010.11929."""
def __init__(self, c1, c2, num_heads, num_layers):
"""Initialize a Transformer module with position embedding and specified number of heads and layers."""
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
self.c2 = c2
def forward(self, x):
"""Forward propagates the input through the bottleneck module."""
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2).permute(2, 0, 1)
return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
class MLPBlock(nn.Module):
def __init__(self, embedding_dim, mlp_dim, act=nn.GELU):
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.act(self.lin1(x)))
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
class LayerNorm2d(nn.Module):
def __init__(self, num_channels, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x):
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class MSDeformAttn(nn.Module):
"""
Original Multi-Scale Deformable Attention Module.
https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py
"""
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
super().__init__()
if d_model % n_heads != 0:
raise ValueError(f'd_model must be divisible by n_heads, but got {d_model} and {n_heads}')
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
assert _d_per_head * n_heads == d_model, '`d_model` must be divisible by `n_heads`'
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(
1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, value, value_spatial_shapes, value_mask=None):
"""
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
Args:
query (Tensor): [bs, query_length, C]
reference_points (Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),
bottom-right (1, 1), including padding area
value (Tensor): [bs, value_length, C]
value_spatial_shapes (List): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements
Returns:
output (Tensor): [bs, Length_{query}, C]
"""
bs, len_q = query.shape[:2]
_, len_v = value.shape[:2]
assert sum(s[0] * s[1] for s in value_spatial_shapes) == len_v
value = self.value_proj(value)
if value_mask is not None:
value = value.masked_fill(value_mask[..., None], float(0))
value = value.view(bs, len_v, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(bs, len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(bs, len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(bs, len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
n = reference_points.shape[-1]
if n == 2:
offset_normalizer = torch.as_tensor(value_spatial_shapes, dtype=query.dtype, device=query.device).flip(-1)
add = sampling_offsets / offset_normalizer[None, None, None, :, None, :]
sampling_locations = reference_points[:, :, None, :, None, :] + add
elif n == 4:
add = sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
sampling_locations = reference_points[:, :, None, :, None, :2] + add
else:
raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {n}.')
output = multi_scale_deformable_attn_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output
class DeformableTransformerDecoderLayer(nn.Module):
"""
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py
"""
def __init__(self, d_model=256, n_heads=8, d_ffn=1024, dropout=0., act=nn.ReLU(), n_levels=4, n_points=4):
super().__init__()
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# cross attention
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.act = act
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(self,
tgt,
reference_points,
src,
src_spatial_shapes,
src_padding_mask=None,
attn_mask=None,
query_pos=None):
# self attention
q = k = self.with_pos_embed(tgt, query_pos)
if attn_mask is not None:
attn_mask = torch.where(attn_mask.astype('bool'), torch.zeros(attn_mask.shape, tgt.dtype),
torch.full(attn_mask.shape, float('-inf'), tgt.dtype))
tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# cross attention
tgt2 = self.cross_attn(self.with_pos_embed(tgt, query_pos), reference_points, src, src_spatial_shapes,
src_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# ffn
tgt = self.forward_ffn(tgt)
return tgt
class DeformableTransformerDecoder(nn.Module):
"""
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
"""
def __init__(self, hidden_dim, decoder_layer, num_layers, eval_idx=-1):
super().__init__() | self.layers = _get_clones(decoder_layer, num_layers) | 1 | 2023-10-24 00:45:55+00:00 | 8k |
zhaojw1998/AccoMontage-3 | train_QA.py | [
{
"identifier": "Query_and_reArrange",
"path": "orchestrator/QA_model.py",
"snippet": "class Query_and_reArrange(nn.Module):\n \"\"\"Q&A model for multi-track rearrangement\"\"\"\n def __init__(self, name, device, trf_layers=2):\n super(Query_and_reArrange, self).__init__()\n\n self.name = name\n self.device = device\n \n # mixture encoder\n self.mixture_enc = PtvaeEncoder(max_simu_note=32, device=self.device, z_size=256)\n\n # track function encoder\n self.function_enc = FunctionEncoder(256, 256, 16)\n\n # feat_dec + pianotree_dec = symbolic decoder\n self.feat_dec = FeatDecoder(z_dim=256) # for key feature reconstruction\n self.feat_emb_layer = nn.Linear(3, 64)\n self.pianotree_dec = PianoTreeDecoder(z_size=256, feat_emb_dim=64, device=device)\n\n self.Transformer_layers = nn.ModuleDict({})\n self.trf_layers = trf_layers\n for idx in range(self.trf_layers):\n self.Transformer_layers[f'layer_{idx}'] = TransformerEncoderLayer(d_model=256, nhead=8, dim_feedforward=1024, dropout=.1, activation=F.gelu, batch_first=True)\n\n self.prog_embedding = nn.Embedding(num_embeddings=35, embedding_dim=256, padding_idx=34)\n\n self.trf_mu = nn.Linear(256, 256)\n self.trf_var = nn.Linear(256, 256)\n\n def run(self, pno_tree_mix, prog, function, pno_tree=None, feat=None, track_pad_mask=None, tfr1=0, tfr2=0, inference=False, mel_id=None):\n \"\"\"\n Forward path of the model in training (w/o computing loss).\n \"\"\"\n\n batch, track, time = function.shape\n max_simu_note = 16\n \n dist_mix, _, _ = self.mixture_enc(pno_tree_mix) \n if inference:\n z_mix = dist_mix.mean\n else:\n z_mix = dist_mix.rsample()\n if track_pad_mask is None:\n track_pad_mask = torch.zeros(batch, track, dtype=bool).to(z_mix.device)\n\n function = function.reshape(-1, 32)\n z_func, cmt_loss, plty = self.function_enc(function, track_pad_mask)\n function_recon = self.function_enc.decoder(z_func).reshape(batch, track, -1)\n\n z_func = z_func.reshape(batch, track, -1) #(batch, track, 256),\n z = torch.cat([\n z_mix.unsqueeze(1), #(batch, 1, 256)\n z_func + self.prog_embedding(prog)],\n dim=1) #z: (batch, track+1, 256)\n\n if not inference:\n trf_mask = torch.cat([torch.zeros(batch, 1, device=z.device).bool(), track_pad_mask], dim=-1) #(batch, track+1)\n else:\n trf_mask = torch.zeros(batch, track+1, device=z.device).bool()\n\n for idx in range(self.trf_layers):\n z = self.Transformer_layers[f'layer_{idx}'](src=z, src_key_padding_mask=trf_mask)\n\n\n z = z[:, 1:].reshape(-1, 256)\n mu = self.trf_mu(z)\n var = self.trf_var(z).exp_()\n\n dist_trf = Normal(mu, var)\n if inference and (mel_id is None):\n z = dist_trf.mean\n elif inference and (mel_id is not None):\n z1 = dist_trf.mean.reshape(batch, track, 256)\n z2 = dist_trf.rsample().reshape(batch, track, 256)\n z = torch.cat([z1[:, :mel_id], z2[:, mel_id: mel_id+1], z1[:, mel_id+1:]], dim=1).reshape(-1, 256)\n else:\n z = dist_trf.rsample()\n\n if not inference:\n feat = feat.reshape(-1, time, 3)\n #reconstruct key feature for self-supervision during training\n recon_feat = self.feat_dec(z, inference, tfr1, feat) #(batch*track, time, 3)\n #embed the reconstructed feature (without applying argmax)\n feat_emb = self.feat_emb_layer(recon_feat)\n\n #prepare the teacher-forcing data for pianotree decoder\n if inference:\n embedded_pno_tree = None\n pno_tree_lgths = None\n else:\n embedded_pno_tree, pno_tree_lgths = self.pianotree_dec.emb_x(pno_tree.reshape(-1, time, max_simu_note, 6))\n\n #pianotree decoder\n recon_pitch, recon_dur = \\\n self.pianotree_dec(z, inference, embedded_pno_tree, pno_tree_lgths, tfr1, tfr2, feat_emb)\n\n recon_pitch = recon_pitch.reshape(batch, track, time, max_simu_note-1, 130)\n recon_dur = recon_dur.reshape(batch, track, time, max_simu_note-1, 5, 2)\n recon_feat = recon_feat.reshape(batch, track, time, 3)\n\n return recon_pitch, recon_dur, recon_feat, \\\n function_recon, \\\n dist_mix, dist_trf, \\\n cmt_loss, plty\n\n def loss_calc(self, pno_tree, feat, function, \n recon_pitch, recon_dur, recon_feat, function_recon,\n dist_mix, dist_trf, cmt_loss, plty, track_pad_mask,\n beta, weights):\n \"\"\" Compute the loss from ground truth and the output of self.run()\"\"\"\n mask = torch.logical_not(track_pad_mask)\n # pianotree recon loss\n pno_tree_l, pitch_l, dur_l = \\\n self.pianotree_dec.recon_loss(pno_tree[mask], \n recon_pitch[mask], \n recon_dur[mask],\n weights)\n # key feature reconstruction loss\n feat_l, onset_feat_l, int_feat_l, center_feat_l = \\\n self.feat_dec.recon_loss(feat[mask], recon_feat[mask])\n\n func_l = self.function_enc.recon_loss(function_recon[mask], function[mask])\n vqvae_l = func_l + cmt_loss\n\n # kl losses\n kl_mix = kl_with_normal(dist_mix)\n kl_trf = kl_with_normal(dist_trf)\n\n kl_l = beta * (kl_mix + kl_trf)\n\n loss = pno_tree_l + feat_l + kl_l + vqvae_l\n\n return loss, pno_tree_l, pitch_l, dur_l, \\\n kl_l, kl_mix, kl_trf, \\\n feat_l, onset_feat_l, int_feat_l, center_feat_l, \\\n vqvae_l, func_l, cmt_loss, plty\n\n def loss(self, pno_tree_mix, prog, function, pno_tree, feat, track_pad_mask, tfr1, tfr2,\n beta=0.01, weights=(1, 0.5)):\n \"\"\"forward and calculate loss\"\"\"\n output = self.run(pno_tree_mix, prog, function, pno_tree, feat, track_pad_mask, tfr1, tfr2)\n return self.loss_calc(pno_tree, feat, function, *output, track_pad_mask, beta, weights)\n \n def output_process(self, recon_pitch, recon_dur):\n grid_recon = torch.cat([recon_pitch.max(-1)[-1].unsqueeze(-1), recon_dur.max(-1)[-1]], dim=-1)\n _, track, _, max_simu_note, grid_dim = grid_recon.shape\n grid_recon = grid_recon.permute(1, 0, 2, 3, 4)\n grid_recon = grid_recon.reshape(track, -1, max_simu_note, grid_dim)\n pr_recon = np.array([grid2pr(matrix) for matrix in grid_recon.detach().cpu().numpy()])\n return pr_recon\n\n def inference(self, pno_tree_mix, prog, function, mel_id=None):\n self.eval()\n with torch.no_grad():\n recon_pitch, recon_dur, _, _, _, _, _, _ = self.run(pno_tree_mix, prog, function, inference=True, mel_id=mel_id)\n pr_recon = self.output_process(recon_pitch, recon_dur)\n return pr_recon\n \n def infer_with_function_codes(self, z_mix, prog, z_func):\n #z_mix: (batch, 256)\n #prog: (batch, track)\n #z_func: (batch, track, 128)\n\n z = torch.cat([ z_mix.unsqueeze(1), #(batch, 1, 256)\n z_func + self.prog_embedding(prog)],\n dim=1) #z: (batch, track+1, 256)\"\"\"\n \n for idx in range(self.trf_layers):\n z = self.Transformer_layers[f'layer_{idx}'](src=z)\n \n z = z[:, 1:].reshape(-1, 256)\n\n mu = self.trf_mu(z)\n var = self.trf_var(z).exp_()\n dist_trf = Normal(mu, var)\n z = dist_trf.mean\n\n recon_feat = self.feat_dec(z, True, 0, None)\n feat_emb = self.feat_emb_layer(recon_feat)\n\n # prepare the teacher-forcing data for pianotree decoder\n embedded_pno_tree = None\n pno_tree_lgths = None\n \n # pianotree decoder\n recon_pitch, recon_dur = \\\n self.pianotree_dec(z, True, embedded_pno_tree, pno_tree_lgths, 0, 0, feat_emb)\n\n recon_pitch = recon_pitch.reshape(*list(prog.shape), 32, 15, 130)\n recon_dur = recon_dur.reshape(*list(prog.shape), 32, 15, 5, 2)\n return recon_pitch, recon_dur\n\n def forward(self, mode, *input, **kwargs):\n if mode in [\"run\", 0]:\n return self.run(*input, **kwargs)\n elif mode in ['loss', 'train', 1]:\n return self.loss(*input, **kwargs)\n elif mode in ['inference', 'eval', 'val', 2]:\n return self.inference(*input, **kwargs)\n else:\n raise NotImplementedError"
},
{
"identifier": "Slakh2100_Pop909_Dataset",
"path": "orchestrator/QA_dataset.py",
"snippet": "class Slakh2100_Pop909_Dataset(Dataset):\n def __init__(self, slakh_dir, pop909_dir, sample_len=SAMPLE_LEN, hop_len=BAR_HOP_LEN, debug_mode=False, split='train', mode='train', with_dynamics=False, merge_pop909=0):\n super(Slakh2100_Pop909_Dataset, self).__init__()\n self.split = split\n self.mode = mode\n self.debug_mode = debug_mode\n\n self.with_dynamics = with_dynamics\n self.merge_pop909 = merge_pop909\n\n self.memory = dict({'tracks': [],\n 'programs': [],\n 'dynamics': [],\n 'dir': []\n })\n self.anchor_list = []\n self.sample_len = sample_len\n \n if slakh_dir is not None:\n print('loading Slakh2100 Dataset ...')\n self.load_data(slakh_dir, sample_len, hop_len)\n if pop909_dir is not None:\n print('loading Pop909 Dataset ...')\n self.load_data(pop909_dir, sample_len, hop_len)\n\n def __len__(self):\n return len(self.anchor_list)\n \n def __getitem__(self, idx):\n song_id, start = self.anchor_list[idx]\n\n if self.mode == 'train': \n tracks_sample = self.memory['tracks'][song_id][:, start: start+self.sample_len]\n program_sample = self.memory['programs'][song_id]\n #delete empty tracks if any\n non_empty = np.nonzero(np.sum(tracks_sample, axis=(1, 2)))[0]\n tracks_sample = tracks_sample[non_empty]\n program_sample = program_sample[non_empty]\n\n elif (self.mode == 'test') or (self.mode == 'inference'): \n tracks_sample = self.memory['tracks'][song_id][:, start:]\n program_sample = self.memory['programs'][song_id]\n\n if ((len(program_sample) <= 3) and (program_sample == 0).all()):\n #merge pop909 into a single piano track at certain probability\n if np.random.rand() < self.merge_pop909: \n tracks_sample = np.max(tracks_sample, axis=0, keepdims=True)\n program_sample = np.array([0])\n\n if self.with_dynamics:\n dynamics = self.memory['dynamics'][song_id][:, start: start+self.sample_len]\n else: \n dynamics = None\n \n return tracks_sample, program_sample, dynamics, self.memory['dir'][song_id]\n\n\n def slakh_program_mapping(self, programs):\n return np.array([EMBED_PROGRAM_MAPPING[SLAKH_PROGRAM_MAPPING[program]] for program in programs])\n\n\n def load_data(self, data_dir, sample_len, hop_len):\n song_list = [os.path.join(data_dir, self.split, item) for item in os.listdir(os.path.join(data_dir, self.split))]\n if self.debug_mode:\n song_list = song_list[: 10]\n for song_dir in tqdm(song_list):\n song_data = np.load(song_dir)\n tracks = song_data['tracks'] #(n_track, time, 128)\n if 'programs' in song_data:\n programs = song_data['programs'] #(n_track, )\n else:\n programs = np.array([0]*len(tracks))\n\n center_pitch = compute_center_pitch(tracks)\n pitch_sort = np.argsort(center_pitch)[::-1]\n tracks = tracks[pitch_sort]\n programs = programs[pitch_sort]\n\n \"\"\"clipping\"\"\" \n if self.mode == 'train':\n if self.split =='validation':\n # during model training, no overlapping for validation set\n for i in range(0, tracks.shape[1], sample_len):\n if i + sample_len >= tracks.shape[1]:\n break\n self.anchor_list.append((len(self.memory['tracks']), i)) #(song_id, start, total_length)\n else:\n # otherwise, hop size is 1-bar\n downbeats = np.nonzero(song_data['db_indicator'])[0]\n for i in range(0, len(downbeats), hop_len):\n if downbeats[i] + sample_len >= tracks.shape[1]:\n break\n self.anchor_list.append((len(self.memory['tracks']), downbeats[i])) #(song_id, start)\n\n elif (self.mode == 'test') or (self.mode == 'inference'):\n start = np.nonzero(song_data['db_indicator'])[0][0]\n end = start + (tracks.shape[1] - start) // sample_len * sample_len\n if end < tracks.shape[1]:\n pad_len = end + sample_len - tracks.shape[1]\n end += sample_len\n tracks = np.pad(tracks, ((0, 0), (0, pad_len), (0, 0)), mode='constant', constant_values=(0,))\n tracks = tracks[:, start: end]\n self.anchor_list.append((len(self.memory['tracks']), start))\n\n self.memory['tracks'].append(tracks)\n self.memory['programs'].append(self.slakh_program_mapping(programs))\n self.memory['dir'].append(song_dir)\n\n if self.with_dynamics:\n self.memory['dynamics'].append(song_data['dynamics'])"
},
{
"identifier": "collate_fn",
"path": "orchestrator/QA_dataset.py",
"snippet": "def collate_fn(batch, device, pitch_shift=True):\n #print(batch)\n max_tracks = max([max(len(item[0]), 1) for item in batch])\n\n tracks = [] \n mixture = []\n instrument = []\n aux_feature = []\n mask = [] #track-wise pad mask\n function = []\n\n if pitch_shift:\n aug_p = AUG_P / AUG_P.sum()\n aug_shift = np.random.choice(np.arange(-6, 6), 1, p=aug_p)[0]\n else:\n aug_shift = 0\n\n for pr, programs, _, _ in batch:\n pr = pr_mat_pitch_shift(pr, aug_shift)\n aux, _, func = compute_pr_feat(pr)\n mask.append([0]*len(pr) + [1]*(max_tracks-len(pr)))\n\n pr = np.pad(pr, ((0, max_tracks-len(pr)), (0, 0), (0, 0)), mode='constant', constant_values=(0,))\n programs = np.pad(programs, (0, max_tracks-len(programs)), mode='constant', constant_values=(NUM_INSTR_CLASS,))\n aux = np.pad(aux, ((0, max_tracks-len(aux)), (0, 0), (0, 0)), mode='constant', constant_values=(0,))\n func = np.pad(func, ((0, max_tracks-len(func)), (0, 0)), mode='constant', constant_values=(0,))\n\n mix = pr2grid(np.max(pr, axis=0), max_note_count=32)\n grid = np.array([pr2grid(matrix) for matrix in pr])\n\n tracks.append(grid)\n mixture.append(mix)\n instrument.append(programs)\n aux_feature.append(aux)\n function.append(func)\n\n return torch.from_numpy(np.array(mixture)).long().to(device), \\\n torch.from_numpy(np.array(instrument)).to(device), \\\n torch.from_numpy(np.array(function)).float().to(device),\\\n torch.from_numpy(np.array(tracks)).long().to(device), \\\n torch.from_numpy(np.array(aux_feature)).float().to(device), \\\n torch.BoolTensor(mask).to(device)"
},
{
"identifier": "MinExponentialLR",
"path": "orchestrator/utils/scheduler.py",
"snippet": "class MinExponentialLR(ExponentialLR):\n def __init__(self, optimizer, gamma, minimum, last_epoch=-1):\n self.min = minimum\n super(MinExponentialLR, self).__init__(optimizer, gamma, last_epoch=-1)\n\n def get_lr(self):\n return [\n max(base_lr * self.gamma ** self.last_epoch, self.min)\n for base_lr in self.base_lrs\n ]"
},
{
"identifier": "OptimizerScheduler",
"path": "orchestrator/utils/scheduler.py",
"snippet": "class OptimizerScheduler(_Scheduler):\n\n def __init__(self, optimizer, scheduler, clip, step=0):\n # optimizer and scheduler are pytorch class\n super(OptimizerScheduler, self).__init__(step)\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.clip = clip\n\n def optimizer_zero_grad(self):\n self.optimizer.zero_grad()\n\n def step(self, require_zero_grad=False):\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n if require_zero_grad:\n self.optimizer_zero_grad()\n self._update_step()"
},
{
"identifier": "TeacherForcingScheduler",
"path": "orchestrator/utils/scheduler.py",
"snippet": "class TeacherForcingScheduler(_Scheduler):\n\n def __init__(self, high, low, scaler, f=scheduled_sampling, step=0):\n super(TeacherForcingScheduler, self).__init__(step)\n self.high = high\n self.low = low\n self._step = step\n self.scaler = scaler\n self.schedule_f = f\n\n def get_tfr(self):\n return self.schedule_f(self._step/self.scaler, self.high, self.low)\n\n def step(self):\n tfr = self.get_tfr()\n self._update_step()\n return tfr"
},
{
"identifier": "ConstantScheduler",
"path": "orchestrator/utils/scheduler.py",
"snippet": "class ConstantScheduler(_Scheduler):\n\n def __init__(self, param, step=0.):\n super(ConstantScheduler, self).__init__(step)\n self.param = param\n\n def step(self, scaler=None):\n self._update_step()\n return self.param"
},
{
"identifier": "ParameterScheduler",
"path": "orchestrator/utils/scheduler.py",
"snippet": "class ParameterScheduler(_Scheduler):\n\n def __init__(self, step=0, mode='train', **schedulers):\n # optimizer and scheduler are pytorch class\n super(ParameterScheduler, self).__init__(step)\n self.schedulers = schedulers\n self.mode = mode\n\n def train(self):\n self.mode = 'train'\n for scheduler in self.schedulers.values():\n scheduler.train()\n\n def eval(self):\n self.mode = 'val'\n for scheduler in self.schedulers.values():\n scheduler.eval()\n\n def step(self):\n params_dic = {}\n for key, scheduler in self.schedulers.items():\n params_dic[key] = scheduler.step()\n return params_dic"
},
{
"identifier": "kl_anealing",
"path": "orchestrator/utils/training.py",
"snippet": "def kl_anealing(i, high=0.1, low=0., scaler=None):\n hh = 1 - low\n ll = 1 - high\n x = 10 * (i - 0.5)\n z = 1 / (1 + np.exp(x))\n y = (hh - ll) * z + ll\n return 1 - y"
},
{
"identifier": "SummaryWriters",
"path": "orchestrator/utils/training.py",
"snippet": "class SummaryWriters:\n\n def __init__(self, writer_names, tags, log_path, tasks=('train', 'val')):\n # writer_names example: ['loss', 'kl_loss', 'recon_loss']\n # tags example: {'name1': None, 'name2': (0, 1)}\n self.log_path = log_path\n #assert 'loss' == writer_names[0]\n self.writer_names = writer_names\n self.tags = tags\n self._regularize_tags()\n\n writer_dic = {}\n for name in writer_names:\n writer_dic[name] = SummaryWriter(os.path.join(log_path, name))\n self.writers = writer_dic\n\n all_tags = {}\n for task in tasks:\n task_dic = {}\n for key, val in self.tags.items():\n task_dic['_'.join([task, key])] = val\n all_tags[task] = task_dic\n self.all_tags = all_tags\n\n def _init_summary_writer(self):\n tags = {'batch_train': (0, 1, 2, 3, 4)}\n self.summary_writers = SummaryWriters(self.writer_names, tags,\n self.writer_path)\n\n def _regularize_tags(self):\n for key, val in self.tags.items():\n if val is None:\n self.tags[key] = tuple(range(len(self.writer_names)))\n\n def single_write(self, name, tag, val, step):\n self.writers[name].add_scalar(tag, val, step)\n\n def write_tag(self, task, tag, vals, step):\n assert len(vals) == len(self.all_tags[task][tag])\n for name_id, val in zip(self.all_tags[task][tag], vals):\n name = self.writer_names[name_id]\n self.single_write(name, tag, val, step)\n\n def write_task(self, task, vals_dic, step):\n for tag, name_ids in self.all_tags[task].items():\n vals = [vals_dic[self.writer_names[i]] for i in name_ids]\n self.write_tag(task, tag, vals, step)"
},
{
"identifier": "LogPathManager",
"path": "orchestrator/utils/training.py",
"snippet": "class LogPathManager:\n\n def __init__(self, readme_fn=None, save_root='.', log_path_name='result',\n with_date=True, with_time=True,\n writer_folder='writers', model_folder='models'):\n date = str(datetime.date.today()) if with_date else ''\n ctime = datetime.datetime.now().time().strftime(\"%H%M%S\") \\\n if with_time else ''\n log_folder = '_'.join([date, ctime, log_path_name])\n log_path = os.path.join(save_root, log_folder)\n writer_path = os.path.join(log_path, writer_folder)\n model_path = os.path.join(log_path, model_folder)\n self.log_path = log_path\n self.writer_path = writer_path\n self.model_path = model_path\n LogPathManager.create_path(log_path)\n LogPathManager.create_path(writer_path)\n LogPathManager.create_path(model_path)\n if readme_fn is not None:\n shutil.copyfile(readme_fn, os.path.join(log_path, 'readme.txt'))\n\n @staticmethod\n def create_path(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def epoch_model_path(self, model_name):\n model_fn = join_fn(model_name, 'epoch', ext='pt')\n return os.path.join(self.model_path, model_fn)\n\n def valid_model_path(self, model_name):\n model_fn = join_fn(model_name, 'valid', ext='pt')\n return os.path.join(self.model_path, model_fn)\n\n def final_model_path(self, model_name):\n model_fn = join_fn(model_name, 'final', ext='pt')\n return os.path.join(self.model_path, model_fn)"
},
{
"identifier": "epoch_time",
"path": "orchestrator/utils/training.py",
"snippet": "def epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs"
}
] | import os
import time
import torch
from torch import optim
from orchestrator.QA_model import Query_and_reArrange
from orchestrator.QA_dataset import Slakh2100_Pop909_Dataset, collate_fn
from torch.utils.data import DataLoader
from orchestrator.utils.scheduler import MinExponentialLR, OptimizerScheduler, TeacherForcingScheduler, ConstantScheduler, ParameterScheduler
from orchestrator.utils.training import kl_anealing, SummaryWriters, LogPathManager, epoch_time
from tqdm import tqdm | 6,288 | os.environ['CUDA_VISIBLE_DEVICES']= '0'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
DEVICE = 'cuda:0'
BATCH_SIZE = 64
TRF_LAYERS = 2
N_EPOCH = 30
CLIP = 3
WEIGHTS = [1, 0.5]
BETA = 1e-2
TFR = [(0.6, 0), (0.5, 0)]
LR = 1e-3
MODEL_NAME = 'VQ-Q&A-T'
SAVE_ROOT = '/data1/zhaojw/AccoMontage3/'
DEBUG = 0
| os.environ['CUDA_VISIBLE_DEVICES']= '0'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
DEVICE = 'cuda:0'
BATCH_SIZE = 64
TRF_LAYERS = 2
N_EPOCH = 30
CLIP = 3
WEIGHTS = [1, 0.5]
BETA = 1e-2
TFR = [(0.6, 0), (0.5, 0)]
LR = 1e-3
MODEL_NAME = 'VQ-Q&A-T'
SAVE_ROOT = '/data1/zhaojw/AccoMontage3/'
DEBUG = 0
| model = Query_and_reArrange(name=MODEL_NAME, trf_layers=TRF_LAYERS, device=DEVICE) | 0 | 2023-10-23 12:36:57+00:00 | 8k |
zcczhang/UVD | uvd/envs/evaluator/vec_envs/vec_env.py | [
{
"identifier": "EnvWorker",
"path": "uvd/envs/evaluator/vec_envs/workers.py",
"snippet": "class EnvWorker(ABC):\n \"\"\"An abstract worker for an environment.\"\"\"\n\n def __init__(self, env_fn: Callable[[], gym.Env]) -> None:\n self._env_fn = env_fn\n self.is_closed = False\n self.result: Union[\n Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray\n ]\n self.action_space = self.get_env_attr(\"action_space\") # noqa: B009\n self.is_reset = False\n\n @abstractmethod\n def get_env_attr(self, key: str) -> Any:\n pass\n\n @abstractmethod\n def set_env_attr(self, key: str, value: Any) -> None:\n pass\n\n def send(self, action: Optional[np.ndarray]) -> None:\n \"\"\"Send action signal to low-level worker.\n\n When action is None, it indicates sending \"reset\" signal;\n otherwise it indicates \"step\" signal. The paired return value\n from \"recv\" function is determined by such kind of different\n signal.\n \"\"\"\n if hasattr(self, \"send_action\"):\n deprecation(\n \"send_action will soon be deprecated. \"\n \"Please use send and recv for your own EnvWorker.\"\n )\n if action is None:\n self.is_reset = True\n self.result = self.reset()\n else:\n self.is_reset = False\n self.send_action(action) # type: ignore\n\n def recv(\n self,\n ) -> Union[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray]:\n \"\"\"Receive result from low-level worker.\n\n If the last \"send\" function sends a NULL action, it only returns\n a single observation; otherwise it returns a tuple of (obs, rew,\n done, info).\n \"\"\"\n if hasattr(self, \"get_result\"):\n deprecation(\n \"get_result will soon be deprecated. \"\n \"Please use send and recv for your own EnvWorker.\"\n )\n if not self.is_reset:\n self.result = self.get_result() # type: ignore\n return self.result\n\n def reset(self) -> np.ndarray:\n self.send(None)\n return self.recv() # type: ignore\n\n def step(\n self, action: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Perform one timestep of the environment's dynamic.\n\n \"send\" and \"recv\" are coupled in sync simulation, so users only\n call \"step\" function. But they can be called separately in async\n simulation, i.e. someone calls \"send\" first, and calls \"recv\"\n later.\n \"\"\"\n self.send(action)\n return self.recv() # type: ignore\n\n @staticmethod\n def wait(\n workers: List[\"EnvWorker\"], wait_num: int, timeout: Optional[float] = None\n ) -> List[\"EnvWorker\"]:\n \"\"\"Given a list of workers, return those ready ones.\"\"\"\n raise NotImplementedError\n\n def seed(self, seed: Optional[int] = None) -> Optional[List[int]]:\n return self.action_space.seed(seed) # issue 299\n\n @abstractmethod\n def render(self, **kwargs: Any) -> Any:\n \"\"\"Render the environment.\"\"\"\n pass\n\n @abstractmethod\n def close_env(self) -> None:\n pass\n\n def close(self) -> None:\n if self.is_closed:\n return None\n self.is_closed = True\n self.close_env()"
},
{
"identifier": "RayEnvWorker",
"path": "uvd/envs/evaluator/vec_envs/workers.py",
"snippet": "class RayEnvWorker(EnvWorker):\n \"\"\"Ray worker used in RayVectorEnv.\"\"\"\n\n def __init__(self, env_fn: Callable[[], gym.Env]) -> None:\n self.env = (\n ray.remote(_SetAttrWrapper)\n .options(num_cpus=0) # type: ignore\n .remote(env_fn())\n )\n super().__init__(env_fn)\n\n def get_env_attr(self, key: str) -> Any:\n return ray.get(self.env.get_env_attr.remote(key))\n\n def set_env_attr(self, key: str, value: Any) -> None:\n ray.get(self.env.set_env_attr.remote(key, value))\n\n def reset(self) -> Any:\n return ray.get(self.env.reset.remote())\n\n @staticmethod\n def wait( # type: ignore\n workers: List[\"RayEnvWorker\"], wait_num: int, timeout: Optional[float] = None\n ) -> List[\"RayEnvWorker\"]:\n results = [x.result for x in workers]\n ready_results, _ = ray.wait(results, num_returns=wait_num, timeout=timeout)\n return [workers[results.index(result)] for result in ready_results]\n\n def send(self, action: Optional[np.ndarray]) -> None:\n # self.action is actually a handle\n if action is None:\n self.result = self.env.reset.remote()\n else:\n self.result = self.env.step.remote(action)\n\n def recv(\n self,\n ) -> Union[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray]:\n return ray.get(self.result) # type: ignore\n\n def seed(self, seed: Optional[int] = None) -> List[int]:\n super().seed(seed)\n return ray.get(self.env.seed.remote(seed))\n\n def render(self, **kwargs: Any) -> Any:\n return ray.get(self.env.render.remote(**kwargs))\n\n def close_env(self) -> None:\n ray.get(self.env.close.remote())"
},
{
"identifier": "SubprocEnvWorker",
"path": "uvd/envs/evaluator/vec_envs/workers.py",
"snippet": "class SubprocEnvWorker(EnvWorker):\n \"\"\"Subprocess worker used in SubprocVectorEnv and ShmemVectorEnv.\"\"\"\n\n def __init__(\n self, env_fn: Callable[[], gym.Env], share_memory: bool = False\n ) -> None:\n self.parent_remote, self.child_remote = Pipe()\n self.share_memory = share_memory\n self.buffer: Optional[Union[dict, tuple, ShArray]] = None\n if self.share_memory:\n dummy = env_fn()\n obs_space = dummy.observation_space\n dummy.close()\n del dummy\n self.buffer = _setup_buf(obs_space)\n args = (\n self.parent_remote,\n self.child_remote,\n CloudpickleWrapper(env_fn),\n self.buffer,\n )\n self.process = Process(target=_worker, args=args, daemon=True)\n self.process.start()\n self.child_remote.close()\n self.is_reset = False\n super().__init__(env_fn)\n\n def get_env_attr(self, key: str) -> Any:\n self.parent_remote.send([\"getattr\", key])\n return self.parent_remote.recv()\n\n def set_env_attr(self, key: str, value: Any) -> None:\n self.parent_remote.send([\"setattr\", {\"key\": key, \"value\": value}])\n\n def _decode_obs(self) -> Union[dict, tuple, np.ndarray]:\n def decode_obs(\n buffer: Optional[Union[dict, tuple, ShArray]]\n ) -> Union[dict, tuple, np.ndarray]:\n if isinstance(buffer, ShArray):\n return buffer.get()\n elif isinstance(buffer, tuple):\n return tuple([decode_obs(b) for b in buffer])\n elif isinstance(buffer, dict):\n return {k: decode_obs(v) for k, v in buffer.items()}\n else:\n raise NotImplementedError\n\n return decode_obs(self.buffer)\n\n @staticmethod\n def wait( # type: ignore\n workers: List[\"SubprocEnvWorker\"],\n wait_num: int,\n timeout: Optional[float] = None,\n ) -> List[\"SubprocEnvWorker\"]:\n remain_conns = conns = [x.parent_remote for x in workers]\n ready_conns: List[connection.Connection] = []\n remain_time, t1 = timeout, time.time()\n while len(remain_conns) > 0 and len(ready_conns) < wait_num:\n if timeout:\n remain_time = timeout - (time.time() - t1)\n if remain_time <= 0:\n break\n # connection.wait hangs if the list is empty\n new_ready_conns = connection.wait(remain_conns, timeout=remain_time)\n ready_conns.extend(new_ready_conns) # type: ignore\n remain_conns = [conn for conn in remain_conns if conn not in ready_conns]\n return [workers[conns.index(con)] for con in ready_conns]\n\n def send(self, action: Optional[np.ndarray]) -> None:\n self.parent_remote.send([\"step\", action])\n\n def recv(\n self,\n ) -> Union[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray]:\n result = self.parent_remote.recv()\n if isinstance(result, tuple):\n obs, rew, done, info = result\n if self.share_memory:\n obs = self._decode_obs()\n return obs, rew, done, info\n else:\n obs = result\n if self.share_memory:\n obs = self._decode_obs()\n return obs\n\n def seed(self, seed: Optional[int] = None) -> Optional[List[int]]:\n super().seed(seed)\n self.parent_remote.send([\"seed\", seed])\n return self.parent_remote.recv()\n\n def render(self, **kwargs: Any) -> Any:\n self.parent_remote.send([\"render\", kwargs])\n return self.parent_remote.recv()\n\n def close_env(self) -> None:\n try:\n self.parent_remote.send([\"close\", None])\n # mp may be deleted so it may raise AttributeError\n self.parent_remote.recv()\n self.process.join()\n except (BrokenPipeError, EOFError, AttributeError):\n pass\n # ensure the subproc is terminated\n self.process.terminate()"
}
] | from typing import Any, Callable, List, Optional, Tuple, Union
from .workers import EnvWorker, RayEnvWorker, SubprocEnvWorker
import gym
import numpy as np
import ray | 5,043 | ready_conns: List[EnvWorker] = []
while not ready_conns:
ready_conns = self.worker_class.wait(
self.waiting_conn, self.wait_num, self.timeout
)
result = []
for conn in ready_conns:
waiting_index = self.waiting_conn.index(conn)
self.waiting_conn.pop(waiting_index)
env_id = self.waiting_id.pop(waiting_index)
obs, rew, done, info = conn.recv()
info["env_id"] = env_id
result.append((obs, rew, done, info))
self.ready_id.append(env_id)
obs_list, rew_list, done_list, info_list = zip(*result)
try:
obs_stack = np.stack(obs_list)
except ValueError: # different len(obs)
obs_stack = np.array(obs_list, dtype=object)
rew_stack, done_stack, info_stack = map(
np.stack, [rew_list, done_list, info_list]
)
return obs_stack, rew_stack, done_stack, info_stack
def seed(
self,
seed: Optional[Union[int, List[int]]] = None,
) -> List[Optional[List[int]]]:
"""Set the seed for all environments.
Accept ``None``, an int (which will extend ``i`` to
``[i, i + 1, i + 2, ...]``) or a list.
:return: The list of seeds used in this env's random number generators.
The first value in the list should be the "main" seed, or the value
which a reproducer pass to "seed".
"""
self._assert_is_not_closed()
seed_list: Union[List[None], List[int]]
if seed is None:
seed_list = [seed] * self.env_num
elif isinstance(seed, int):
seed_list = [seed + i for i in range(self.env_num)]
else:
seed_list = seed
return [w.seed(s) for w, s in zip(self.workers, seed_list)]
def render(self, **kwargs: Any) -> List[Any]:
"""Render all of the environments."""
self._assert_is_not_closed()
if self.is_async and len(self.waiting_id) > 0:
raise RuntimeError(
f"Environments {self.waiting_id} are still stepping, cannot "
"render them now."
)
return [w.render(**kwargs) for w in self.workers]
def close(self) -> None:
"""Close all of the environments.
This function will be called only once (if not, it will be
called during garbage collected). This way, ``close`` of all
workers can be assured.
"""
self._assert_is_not_closed()
for w in self.workers:
w.close()
self.is_closed = True
class SubprocVectorEnv(BaseVectorEnv):
"""Vectorized environment wrapper based on subprocess.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
return SubprocEnvWorker(fn, share_memory=False)
super().__init__(env_fns, worker_fn, **kwargs)
class ShmemVectorEnv(BaseVectorEnv):
"""Optimized SubprocVectorEnv with shared buffers to exchange observations.
ShmemVectorEnv has exactly the same API as SubprocVectorEnv.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
return SubprocEnvWorker(fn, share_memory=True)
super().__init__(env_fns, worker_fn, **kwargs)
class RayVectorEnv(BaseVectorEnv):
"""Vectorized environment wrapper based on ray.
This is a choice to run distributed environments in a cluster.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
try:
except ImportError as exception:
raise ImportError(
"Please install ray to support RayVectorEnv: pip install ray"
) from exception
if not ray.is_initialized():
ray.init()
| """Modified from `tianshou`"""
GYM_RESERVED_KEYS = [
"metadata",
"reward_range",
"spec",
"action_space",
"observation_space",
]
class BaseVectorEnv(object):
"""Base class for vectorized environments.
Usage:
::
env_num = 8
envs = DummyVectorEnv([lambda: gym.make(task) for _ in range(env_num)])
assert len(envs) == env_num
It accepts a list of environment generators. In other words, an environment
generator ``efn`` of a specific task means that ``efn()`` returns the
environment of the given task, for example, ``gym.make(task)``.
All of the VectorEnv must inherit :class:`~tianshou.env.BaseVectorEnv`.
Here are some other usages:
::
envs.seed(2) # which is equal to the next line
envs.seed([2, 3, 4, 5, 6, 7, 8, 9]) # set specific seed for each env
obs = envs.reset() # reset all environments
obs = envs.reset([0, 5, 7]) # reset 3 specific environments
obs, rew, done, info = envs.step([1] * 8) # step synchronously
envs.render() # render all environments
envs.close() # close all environments
.. warning::
If you use your own environment, please make sure the ``seed`` method
is set up properly, e.g.,
::
def seed(self, seed):
np.random.seed(seed)
Otherwise, the outputs of these envs may be the same with each other.
:param env_fns: a list of callable envs, ``env_fns[i]()`` generates the i-th env.
:param worker_fn: a callable worker, ``worker_fn(env_fns[i])`` generates a
worker which contains the i-th env.
:param int wait_num: use in asynchronous simulation if the time cost of
``env.step`` varies with time and synchronously waiting for all
environments to finish a step is time-wasting. In that case, we can
return when ``wait_num`` environments finish a step and keep on
simulation in these environments. If ``None``, asynchronous simulation
is disabled; else, ``1 <= wait_num <= env_num``.
:param float timeout: use in asynchronous simulation same as above, in each
vectorized step it only deal with those environments spending time
within ``timeout`` seconds.
"""
def __init__(
self,
env_fns: List[Callable[[], gym.Env]],
worker_fn: Callable[[Callable[[], gym.Env]], EnvWorker],
wait_num: Optional[int] = None,
timeout: Optional[float] = None,
) -> None:
self._env_fns = env_fns
# A VectorEnv contains a pool of EnvWorkers, which corresponds to
# interact with the given envs (one worker <-> one env).
self.workers = [worker_fn(fn) for fn in env_fns]
self.worker_class = type(self.workers[0])
assert issubclass(self.worker_class, EnvWorker)
assert all([isinstance(w, self.worker_class) for w in self.workers])
self.env_num = len(env_fns)
self.wait_num = wait_num or len(env_fns)
assert (
1 <= self.wait_num <= len(env_fns)
), f"wait_num should be in [1, {len(env_fns)}], but got {wait_num}"
self.timeout = timeout
assert (
self.timeout is None or self.timeout > 0
), f"timeout is {timeout}, it should be positive if provided!"
self.is_async = self.wait_num != len(env_fns) or timeout is not None
self.waiting_conn: List[EnvWorker] = []
# environments in self.ready_id is actually ready
# but environments in self.waiting_id are just waiting when checked,
# and they may be ready now, but this is not known until we check it
# in the step() function
self.waiting_id: List[int] = []
# all environments are ready in the beginning
self.ready_id = list(range(self.env_num))
self.is_closed = False
def _assert_is_not_closed(self) -> None:
assert (
not self.is_closed
), f"Methods of {self.__class__.__name__} cannot be called after close."
def __len__(self) -> int:
"""Return len(self), which is the number of environments."""
return self.env_num
def __getattribute__(self, key: str) -> Any:
"""Switch the attribute getter depending on the key.
Any class who inherits ``gym.Env`` will inherit some attributes,
like ``action_space``. However, we would like the attribute
lookup to go straight into the worker (in fact, this vector
env's action_space is always None).
"""
if key in GYM_RESERVED_KEYS: # reserved keys in gym.Env
return self.get_env_attr(key)
else:
return super().__getattribute__(key)
def get_env_attr(
self,
key: str,
id: Optional[Union[int, List[int], np.ndarray]] = None,
) -> List[Any]:
"""Get an attribute from the underlying environments.
If id is an int, retrieve the attribute denoted by key from the
environment underlying the worker at index id. The result is
returned as a list with one element. Otherwise, retrieve the
attribute for all workers at indices id and return a list that
is ordered correspondingly to id.
:param str key: The key of the desired attribute.
:param id: Indice(s) of the desired worker(s). Default to None
for all env_id.
:return list: The list of environment attributes.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
return [self.workers[j].get_env_attr(key) for j in id]
def set_env_attr(
self,
key: str,
value: Any,
id: Optional[Union[int, List[int], np.ndarray]] = None,
diff_value: bool = False,
) -> None:
"""Set an attribute in the underlying environments.
If id is an int, set the attribute denoted by key from the
environment underlying the worker at index id to value.
Otherwise, set the attribute for all workers at indices id.
:param str key: The key of the desired attribute.
:param Any value: The new value of the attribute.
:param id: Indice(s) of the desired worker(s). Default to None
for all env_id.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if diff_value:
assert len(value) == len(id)
if self.is_async:
self._assert_id(id)
for i, j in enumerate(id):
if diff_value:
self.workers[j].set_env_attr(key, value[i])
else:
self.workers[j].set_env_attr(key, value)
def _wrap_id(
self,
id: Optional[Union[int, List[int], np.ndarray]] = None,
) -> Union[List[int], np.ndarray]:
if id is None:
return list(range(self.env_num))
return [id] if np.isscalar(id) else id # type: ignore
def _assert_id(self, id: Union[List[int], np.ndarray]) -> None:
for i in id:
assert (
i not in self.waiting_id
), f"Cannot interact with environment {i} which is stepping now."
assert (
i in self.ready_id
), f"Can only interact with ready environments {self.ready_id}."
# TODO: compatible issue with reset -> (obs, info)
def reset(
self, id: Optional[Union[int, List[int], np.ndarray]] = None
) -> np.ndarray:
"""Reset the state of some envs and return initial observations.
If id is None, reset the state of all the environments and
return initial observations, otherwise reset the specific
environments with the given id, either an int or a list.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
# send(None) == reset() in worker
for i in id:
self.workers[i].send(None)
obs_list = [self.workers[i].recv() for i in id]
try:
obs = np.stack(obs_list)
except ValueError: # different len(obs)
obs = np.array(obs_list, dtype=object)
return obs
def step(
self,
action: np.ndarray,
id: Optional[Union[int, List[int], np.ndarray]] = None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Run one timestep of some environments' dynamics.
If id is None, run one timestep of all the environments’ dynamics;
otherwise run one timestep for some environments with given id, either
an int or a list. When the end of episode is reached, you are
responsible for calling reset(id) to reset this environment’s state.
Accept a batch of action and return a tuple (batch_obs, batch_rew,
batch_done, batch_info) in numpy format.
:param numpy.ndarray action: a batch of action provided by the agent.
:return: A tuple including four items:
* ``obs`` a numpy.ndarray, the agent's observation of current environments
* ``rew`` a numpy.ndarray, the amount of rewards returned after \
previous actions
* ``done`` a numpy.ndarray, whether these episodes have ended, in \
which case further step() calls will return undefined results
* ``info`` a numpy.ndarray, contains auxiliary diagnostic \
information (helpful for debugging, and sometimes learning)
For the async simulation:
Provide the given action to the environments. The action sequence
should correspond to the ``id`` argument, and the ``id`` argument
should be a subset of the ``env_id`` in the last returned ``info``
(initially they are env_ids of all the environments). If action is
None, fetch unfinished step() calls instead.
"""
self._assert_is_not_closed()
id = self._wrap_id(id)
if not self.is_async:
assert len(action) == len(id)
for i, j in enumerate(id):
self.workers[j].send(action[i])
result = []
for j in id:
obs, rew, done, info = self.workers[j].recv()
info["env_id"] = j
result.append((obs, rew, done, info))
else:
if action is not None:
self._assert_id(id)
assert len(action) == len(id)
for act, env_id in zip(action, id):
self.workers[env_id].send(act)
self.waiting_conn.append(self.workers[env_id])
self.waiting_id.append(env_id)
self.ready_id = [x for x in self.ready_id if x not in id]
ready_conns: List[EnvWorker] = []
while not ready_conns:
ready_conns = self.worker_class.wait(
self.waiting_conn, self.wait_num, self.timeout
)
result = []
for conn in ready_conns:
waiting_index = self.waiting_conn.index(conn)
self.waiting_conn.pop(waiting_index)
env_id = self.waiting_id.pop(waiting_index)
obs, rew, done, info = conn.recv()
info["env_id"] = env_id
result.append((obs, rew, done, info))
self.ready_id.append(env_id)
obs_list, rew_list, done_list, info_list = zip(*result)
try:
obs_stack = np.stack(obs_list)
except ValueError: # different len(obs)
obs_stack = np.array(obs_list, dtype=object)
rew_stack, done_stack, info_stack = map(
np.stack, [rew_list, done_list, info_list]
)
return obs_stack, rew_stack, done_stack, info_stack
def seed(
self,
seed: Optional[Union[int, List[int]]] = None,
) -> List[Optional[List[int]]]:
"""Set the seed for all environments.
Accept ``None``, an int (which will extend ``i`` to
``[i, i + 1, i + 2, ...]``) or a list.
:return: The list of seeds used in this env's random number generators.
The first value in the list should be the "main" seed, or the value
which a reproducer pass to "seed".
"""
self._assert_is_not_closed()
seed_list: Union[List[None], List[int]]
if seed is None:
seed_list = [seed] * self.env_num
elif isinstance(seed, int):
seed_list = [seed + i for i in range(self.env_num)]
else:
seed_list = seed
return [w.seed(s) for w, s in zip(self.workers, seed_list)]
def render(self, **kwargs: Any) -> List[Any]:
"""Render all of the environments."""
self._assert_is_not_closed()
if self.is_async and len(self.waiting_id) > 0:
raise RuntimeError(
f"Environments {self.waiting_id} are still stepping, cannot "
"render them now."
)
return [w.render(**kwargs) for w in self.workers]
def close(self) -> None:
"""Close all of the environments.
This function will be called only once (if not, it will be
called during garbage collected). This way, ``close`` of all
workers can be assured.
"""
self._assert_is_not_closed()
for w in self.workers:
w.close()
self.is_closed = True
class SubprocVectorEnv(BaseVectorEnv):
"""Vectorized environment wrapper based on subprocess.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
return SubprocEnvWorker(fn, share_memory=False)
super().__init__(env_fns, worker_fn, **kwargs)
class ShmemVectorEnv(BaseVectorEnv):
"""Optimized SubprocVectorEnv with shared buffers to exchange observations.
ShmemVectorEnv has exactly the same API as SubprocVectorEnv.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> SubprocEnvWorker:
return SubprocEnvWorker(fn, share_memory=True)
super().__init__(env_fns, worker_fn, **kwargs)
class RayVectorEnv(BaseVectorEnv):
"""Vectorized environment wrapper based on ray.
This is a choice to run distributed environments in a cluster.
.. seealso::
Please refer to :class:`~tianshou.env.BaseVectorEnv` for other APIs' usage.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], **kwargs: Any) -> None:
try:
except ImportError as exception:
raise ImportError(
"Please install ray to support RayVectorEnv: pip install ray"
) from exception
if not ray.is_initialized():
ray.init() | super().__init__(env_fns, RayEnvWorker, **kwargs) | 1 | 2023-10-17 19:08:14+00:00 | 8k |
bytedance/ColTrack | models/dino/deformable_transformer.py | [
{
"identifier": "inverse_sigmoid",
"path": "util/misc.py",
"snippet": "def inverse_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat(((x[..., :2] + 1) / 3, x[..., 2:] / 2), dim=-1)\n elif x.shape[-1] == 2:\n x = (x + 1) / 3\n else:\n raise ValueError\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)"
},
{
"identifier": "scale_sigmoid",
"path": "util/misc.py",
"snippet": "def scale_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat((3 * (x[..., :2]) - 1, x[..., 2:] * 2), dim=-1)\n elif x.shape[-1] == 2:\n x = 3 * x - 1\n else:\n raise ValueError\n return x"
},
{
"identifier": "gen_encoder_output_proposals",
"path": "models/dino/utils.py",
"snippet": "def gen_encoder_output_proposals(memory:Tensor, memory_padding_mask:Tensor, spatial_shapes:Tensor, learnedwh=None):\n \"\"\"\n Input:\n - memory: bs, \\sum{hw}, d_model\n - memory_padding_mask: bs, \\sum{hw}\n - spatial_shapes: nlevel, 2\n - learnedwh: 2\n Output:\n - output_memory: bs, \\sum{hw}, d_model\n - output_proposals: bs, \\sum{hw}, 4\n \"\"\"\n N_, S_, C_ = memory.shape\n base_scale = 4.0\n proposals = []\n _cur = 0\n for lvl, (H_, W_) in enumerate(spatial_shapes):\n mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)\n valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n # import ipdb; ipdb.set_trace()\n\n grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),\n torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2\n\n scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n\n if learnedwh is not None:\n # import ipdb; ipdb.set_trace()\n wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0 ** lvl)\n raise NotImplementedError\n else:\n wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)\n\n # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1)\n # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n # wh = torch.ones_like(grid) / scale\n proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)\n proposals.append(proposal)\n _cur += (H_ * W_)\n # import ipdb; ipdb.set_trace()\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n output_proposals = torch.cat(((output_proposals[..., :2] + 1) / 3, output_proposals[..., 2:] / 2), dim=-1)\n output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid\n output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n\n output_memory = memory\n output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n\n # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))\n # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf'))\n\n return output_memory, output_proposals"
},
{
"identifier": "MLP",
"path": "models/dino/utils.py",
"snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x"
},
{
"identifier": "_get_activation_fn",
"path": "models/dino/utils.py",
"snippet": "def _get_activation_fn(activation, d_model=256, batch_dim=0):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n if activation == \"prelu\":\n return nn.PReLU()\n if activation == \"selu\":\n return F.selu\n\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")"
},
{
"identifier": "gen_sineembed_for_position",
"path": "models/dino/utils.py",
"snippet": "def gen_sineembed_for_position(pos_tensor):\n # n_query, bs, _ = pos_tensor.size()\n # sineembed_tensor = torch.zeros(n_query, bs, 256)\n scale = 2 * math.pi\n dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)\n dim_t = 10000 ** (2 * (dim_t // 2) / 128)\n x_embed = pos_tensor[:, :, 0] * scale\n y_embed = pos_tensor[:, :, 1] * scale\n pos_x = x_embed[:, :, None] / dim_t\n pos_y = y_embed[:, :, None] / dim_t\n pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)\n pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)\n if pos_tensor.size(-1) == 2:\n pos = torch.cat((pos_y, pos_x), dim=2)\n elif pos_tensor.size(-1) == 4:\n w_embed = pos_tensor[:, :, 2] * scale\n pos_w = w_embed[:, :, None] / dim_t\n pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)\n\n h_embed = pos_tensor[:, :, 3] * scale\n pos_h = h_embed[:, :, None] / dim_t\n pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)\n\n pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)\n else:\n raise ValueError(\"Unknown pos_tensor shape(-1):{}\".format(pos_tensor.size(-1)))\n return pos"
},
{
"identifier": "MSDeformAttn",
"path": "models/dino/ops/modules/ms_deform_attn.py",
"snippet": "class MSDeformAttn(nn.Module):\n def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):\n \"\"\"\n Multi-Scale Deformable Attention Module\n :param d_model hidden dimension\n :param n_levels number of feature levels\n :param n_heads number of attention heads\n :param n_points number of sampling points per attention head per feature level\n \"\"\"\n super().__init__()\n if d_model % n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))\n _d_per_head = d_model // n_heads\n # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation\n if not _is_power_of_2(_d_per_head):\n warnings.warn(\"You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 \"\n \"which is more efficient in our CUDA implementation.\")\n\n self.im2col_step = 64\n\n self.d_model = d_model\n self.n_levels = n_levels\n self.n_heads = n_heads\n self.n_points = n_points\n\n self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)\n self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)\n self.value_proj = nn.Linear(d_model, d_model)\n self.output_proj = nn.Linear(d_model, d_model)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n constant_(self.sampling_offsets.weight.data, 0.)\n thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\n grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)\n for i in range(self.n_points):\n grid_init[:, :, i, :] *= i + 1\n with torch.no_grad():\n self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))\n constant_(self.attention_weights.weight.data, 0.)\n constant_(self.attention_weights.bias.data, 0.)\n xavier_uniform_(self.value_proj.weight.data)\n constant_(self.value_proj.bias.data, 0.)\n xavier_uniform_(self.output_proj.weight.data)\n constant_(self.output_proj.bias.data, 0.)\n\n def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):\n \"\"\"\n :param query (N, Length_{query}, C)\n :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area\n or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes\n :param input_flatten (N, \\sum_{l=0}^{L-1} H_l \\cdot W_l, C)\n :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]\n :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]\n :param input_padding_mask (N, \\sum_{l=0}^{L-1} H_l \\cdot W_l), True for padding elements, False for non-padding elements\n\n :return output (N, Length_{query}, C)\n \"\"\"\n N, Len_q, _ = query.shape\n N, Len_in, _ = input_flatten.shape\n assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in\n\n value = self.value_proj(input_flatten)\n if input_padding_mask is not None:\n value = value.masked_fill(input_padding_mask[..., None], float(0))\n value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)\n sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)\n attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)\n attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)\n # N, Len_q, n_heads, n_levels, n_points, 2\n if reference_points.shape[-1] == 2:\n offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)\n sampling_locations = reference_points[:, :, None, :, None, :] \\\n + sampling_offsets / offset_normalizer[None, None, None, :, None, :]\n elif reference_points.shape[-1] == 4:\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\n + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5\n else:\n raise ValueError(\n 'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))\n\n # for amp\n if value.dtype == torch.float16:\n # for mixed precision\n output = MSDeformAttnFunction.apply(\n value.to(torch.float32), input_spatial_shapes, input_level_start_index, sampling_locations.to(torch.float32), attention_weights, self.im2col_step)\n output = output.to(torch.float16)\n output = self.output_proj(output)\n return output\n\n\n output = MSDeformAttnFunction.apply(\n value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)\n output = self.output_proj(output)\n return output"
}
] | import math, random
import copy
import torch
from typing import Optional
from util.misc import inverse_sigmoid, scale_sigmoid
from torch import nn, Tensor
from .utils import gen_encoder_output_proposals, MLP,_get_activation_fn, gen_sineembed_for_position
from .ops.modules import MSDeformAttn
from .utils import RandomBoxPerturber | 5,652 | raise NotImplementedError
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, num_encoder_layers,
encoder_norm, d_model=d_model,
num_queries=num_queries,
deformable_encoder=deformable_encoder,
enc_layer_share=enc_layer_share,
two_stage_type=two_stage_type
)
# choose decoder layer type
if deformable_decoder:
decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, dec_n_points, use_deformable_box_attn=use_deformable_box_attn, box_attn_type=box_attn_type,
key_aware_type=key_aware_type,
decoder_sa_type=decoder_sa_type,
module_seq=module_seq)
else:
raise NotImplementedError
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
return_intermediate=return_intermediate_dec,
d_model=d_model, query_dim=query_dim,
modulate_hw_attn=modulate_hw_attn,
num_feature_levels=num_feature_levels,
deformable_decoder=deformable_decoder,
decoder_query_perturber=decoder_query_perturber,
dec_layer_number=dec_layer_number, rm_dec_query_scale=rm_dec_query_scale,
dec_layer_share=dec_layer_share,
use_detached_boxes_dec_out=use_detached_boxes_dec_out
)
self.d_model = d_model
self.nhead = nhead
self.dec_layers = num_decoder_layers
self.num_queries = num_queries # useful for single stage model only
self.num_patterns = num_patterns
if not isinstance(num_patterns, int):
Warning("num_patterns should be int but {}".format(type(num_patterns)))
self.num_patterns = 0
if num_feature_levels > 1:
if self.num_encoder_layers > 0:
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
else:
self.level_embed = None
self.learnable_tgt_init = learnable_tgt_init
assert learnable_tgt_init, "why not learnable_tgt_init"
self.embed_init_tgt = embed_init_tgt
if (two_stage_type != 'no' and embed_init_tgt) or (two_stage_type == 'no'):
self.tgt_embed = nn.Embedding(self.num_queries, d_model)
nn.init.normal_(self.tgt_embed.weight.data)
else:
self.tgt_embed = None
# for two stage
self.two_stage_type = two_stage_type
self.two_stage_pat_embed = two_stage_pat_embed
self.two_stage_add_query_num = two_stage_add_query_num
self.two_stage_learn_wh = two_stage_learn_wh
assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type)
if two_stage_type =='standard':
# anchor selection at the output of encoder
self.enc_output = nn.Linear(d_model, d_model)
self.enc_output_norm = nn.LayerNorm(d_model)
if two_stage_pat_embed > 0:
self.pat_embed_for_2stage = nn.Parameter(torch.Tensor(two_stage_pat_embed, d_model))
nn.init.normal_(self.pat_embed_for_2stage)
if two_stage_add_query_num > 0:
self.tgt_embed = nn.Embedding(self.two_stage_add_query_num, d_model)
if two_stage_learn_wh:
# import ipdb; ipdb.set_trace()
self.two_stage_wh_embedding = nn.Embedding(1, 2)
else:
self.two_stage_wh_embedding = None
if two_stage_type == 'no':
self.init_ref_points(num_queries) # init self.refpoint_embed
self.enc_out_class_embed = None
self.enc_out_bbox_embed = None
# evolution of anchors
self.dec_layer_number = dec_layer_number
if dec_layer_number is not None:
if self.two_stage_type != 'no' or num_patterns == 0:
assert dec_layer_number[0] == num_queries, f"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries})"
else:
assert dec_layer_number[0] == num_queries * num_patterns, f"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries}) * num_patterns({num_patterns})"
self._reset_parameters()
self.rm_self_attn_layers = rm_self_attn_layers
if rm_self_attn_layers is not None:
# assert len(rm_self_attn_layers) == num_decoder_layers
print("Removing the self-attn in {} decoder layers".format(rm_self_attn_layers))
for lid, dec_layer in enumerate(self.decoder.layers):
if lid in rm_self_attn_layers:
dec_layer.rm_self_attn_modules()
self.rm_detach = rm_detach
if self.rm_detach:
assert isinstance(rm_detach, list)
assert any([i in ['enc_ref', 'enc_tgt', 'dec'] for i in rm_detach])
self.decoder.rm_detach = rm_detach
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
| # ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Conditional DETR Transformer class.
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
class DeformableTransformer(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_queries=300,
num_encoder_layers=6,
num_unicoder_layers=0,
num_decoder_layers=6,
dim_feedforward=2048, dropout=0.0,
activation="relu", normalize_before=False,
return_intermediate_dec=False, query_dim=4,
num_patterns=0,
modulate_hw_attn=False,
# for deformable encoder
deformable_encoder=False,
deformable_decoder=False,
num_feature_levels=1,
enc_n_points=4,
dec_n_points=4,
use_deformable_box_attn=False,
box_attn_type='roi_align',
# init query
learnable_tgt_init=False,
decoder_query_perturber=None,
add_channel_attention=False,
add_pos_value=False,
random_refpoints_xy=False,
# two stage
two_stage_type='no', # ['no', 'standard', 'early', 'combine', 'enceachlayer', 'enclayer1']
two_stage_pat_embed=0,
two_stage_add_query_num=0,
two_stage_learn_wh=False,
two_stage_keep_all_tokens=False,
# evo of #anchors
dec_layer_number=None,
rm_enc_query_scale=True,
rm_dec_query_scale=True,
rm_self_attn_layers=None,
key_aware_type=None,
# layer share
layer_share_type=None,
# for detach
rm_detach=None,
decoder_sa_type='ca',
module_seq=['sa', 'ca', 'ffn'],
# for dn
embed_init_tgt=False,
use_detached_boxes_dec_out=False,
):
super().__init__()
self.num_feature_levels = num_feature_levels
self.num_encoder_layers = num_encoder_layers
self.num_unicoder_layers = num_unicoder_layers
self.num_decoder_layers = num_decoder_layers
self.deformable_encoder = deformable_encoder
self.deformable_decoder = deformable_decoder
self.two_stage_keep_all_tokens = two_stage_keep_all_tokens
self.num_queries = num_queries
self.random_refpoints_xy = random_refpoints_xy
self.use_detached_boxes_dec_out = use_detached_boxes_dec_out
assert query_dim == 4
if num_feature_levels > 1:
assert deformable_encoder, "only support deformable_encoder for num_feature_levels > 1"
if use_deformable_box_attn:
assert deformable_encoder or deformable_encoder
assert layer_share_type in [None, 'encoder', 'decoder', 'both']
if layer_share_type in ['encoder', 'both']:
enc_layer_share = True
else:
enc_layer_share = False
if layer_share_type in ['decoder', 'both']:
dec_layer_share = True
else:
dec_layer_share = False
assert layer_share_type is None
self.decoder_sa_type = decoder_sa_type
assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']
# choose encoder layer type
if deformable_encoder:
encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points, add_channel_attention=add_channel_attention, use_deformable_box_attn=use_deformable_box_attn, box_attn_type=box_attn_type)
else:
raise NotImplementedError
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, num_encoder_layers,
encoder_norm, d_model=d_model,
num_queries=num_queries,
deformable_encoder=deformable_encoder,
enc_layer_share=enc_layer_share,
two_stage_type=two_stage_type
)
# choose decoder layer type
if deformable_decoder:
decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, dec_n_points, use_deformable_box_attn=use_deformable_box_attn, box_attn_type=box_attn_type,
key_aware_type=key_aware_type,
decoder_sa_type=decoder_sa_type,
module_seq=module_seq)
else:
raise NotImplementedError
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
return_intermediate=return_intermediate_dec,
d_model=d_model, query_dim=query_dim,
modulate_hw_attn=modulate_hw_attn,
num_feature_levels=num_feature_levels,
deformable_decoder=deformable_decoder,
decoder_query_perturber=decoder_query_perturber,
dec_layer_number=dec_layer_number, rm_dec_query_scale=rm_dec_query_scale,
dec_layer_share=dec_layer_share,
use_detached_boxes_dec_out=use_detached_boxes_dec_out
)
self.d_model = d_model
self.nhead = nhead
self.dec_layers = num_decoder_layers
self.num_queries = num_queries # useful for single stage model only
self.num_patterns = num_patterns
if not isinstance(num_patterns, int):
Warning("num_patterns should be int but {}".format(type(num_patterns)))
self.num_patterns = 0
if num_feature_levels > 1:
if self.num_encoder_layers > 0:
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
else:
self.level_embed = None
self.learnable_tgt_init = learnable_tgt_init
assert learnable_tgt_init, "why not learnable_tgt_init"
self.embed_init_tgt = embed_init_tgt
if (two_stage_type != 'no' and embed_init_tgt) or (two_stage_type == 'no'):
self.tgt_embed = nn.Embedding(self.num_queries, d_model)
nn.init.normal_(self.tgt_embed.weight.data)
else:
self.tgt_embed = None
# for two stage
self.two_stage_type = two_stage_type
self.two_stage_pat_embed = two_stage_pat_embed
self.two_stage_add_query_num = two_stage_add_query_num
self.two_stage_learn_wh = two_stage_learn_wh
assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type)
if two_stage_type =='standard':
# anchor selection at the output of encoder
self.enc_output = nn.Linear(d_model, d_model)
self.enc_output_norm = nn.LayerNorm(d_model)
if two_stage_pat_embed > 0:
self.pat_embed_for_2stage = nn.Parameter(torch.Tensor(two_stage_pat_embed, d_model))
nn.init.normal_(self.pat_embed_for_2stage)
if two_stage_add_query_num > 0:
self.tgt_embed = nn.Embedding(self.two_stage_add_query_num, d_model)
if two_stage_learn_wh:
# import ipdb; ipdb.set_trace()
self.two_stage_wh_embedding = nn.Embedding(1, 2)
else:
self.two_stage_wh_embedding = None
if two_stage_type == 'no':
self.init_ref_points(num_queries) # init self.refpoint_embed
self.enc_out_class_embed = None
self.enc_out_bbox_embed = None
# evolution of anchors
self.dec_layer_number = dec_layer_number
if dec_layer_number is not None:
if self.two_stage_type != 'no' or num_patterns == 0:
assert dec_layer_number[0] == num_queries, f"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries})"
else:
assert dec_layer_number[0] == num_queries * num_patterns, f"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries}) * num_patterns({num_patterns})"
self._reset_parameters()
self.rm_self_attn_layers = rm_self_attn_layers
if rm_self_attn_layers is not None:
# assert len(rm_self_attn_layers) == num_decoder_layers
print("Removing the self-attn in {} decoder layers".format(rm_self_attn_layers))
for lid, dec_layer in enumerate(self.decoder.layers):
if lid in rm_self_attn_layers:
dec_layer.rm_self_attn_modules()
self.rm_detach = rm_detach
if self.rm_detach:
assert isinstance(rm_detach, list)
assert any([i in ['enc_ref', 'enc_tgt', 'dec'] for i in rm_detach])
self.decoder.rm_detach = rm_detach
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules(): | if isinstance(m, MSDeformAttn): | 6 | 2023-10-16 02:18:33+00:00 | 8k |
alm0ra/mockafka-py | tests/test_consumer.py | [
{
"identifier": "FakeAdminClientImpl",
"path": "mockafka/admin_client.py",
"snippet": "class FakeAdminClientImpl:\n \"\"\"\n Mock implementation of the Confluent Kafka AdminClient for testing purposes.\n\n Attributes:\n - kafka (KafkaStore): The in-memory storage for simulating Kafka behavior.\n - clean (bool): Flag indicating whether to start with a clean slate.\n\n Methods:\n - create_partitions(partitions: List[NewPartitions]): Create partitions in the in-memory Kafka store.\n - create_partition(partition: NewPartitions): Create a single partition in the in-memory Kafka store.\n - create_topics(topics: List[NewTopic]): Create topics in the in-memory Kafka store.\n - create_topic(topic: NewTopic): Create a single topic in the in-memory Kafka store.\n - delete_topics(topics, future=None, request_timeout=None, operation_timeout=None): Delete topics from the in-memory Kafka store.\n - delete_topic(topic: NewTopic): Delete a single topic from the in-memory Kafka store.\n - describe_acls(acl_binding_filter, future, request_timeout=None): Describe ACLs (unsupported in mockafka).\n - describe_configs(resources, future, request_timeout=None, broker=None): Describe configurations (unsupported in mockafka).\n - delete_acls(acl_binding_filters, future, request_timeout=None): Delete ACLs (unsupported in mockafka).\n - alter_configs(*args, **kwargs): Alter configurations (unsupported in mockafka).\n - create_acls(*args, **kwargs): Create ACLs (unsupported in mockafka).\n - list_groups(group=None, *args, **kwargs): List consumer groups (unsupported in mockafka).\n - list_topics(topic=None, *args, **kwargs): List topics (returns ClusterMetadata).\n - poll(timeout=None): Poll for events (unsupported in mockafka).\n - __len__(*args, **kwargs): Get the length of the Kafka store (not implemented).\n \"\"\"\n\n def __init__(self, clean: bool = False, *args, **kwargs):\n \"\"\"\n Initialize the FakeAdminClientImpl.\n\n Parameters:\n - clean (bool): Flag indicating whether to start with a clean slate.\n \"\"\"\n self.kafka = KafkaStore(clean=clean)\n\n def create_partitions(self, partitions: list[NewPartitions]):\n \"\"\"\n Create partitions in the in-memory Kafka store.\n\n Parameters:\n - partitions (List[NewPartitions]): List of partition objects to be created.\n \"\"\"\n for partition in partitions:\n self.create_partition(partition)\n\n def create_partition(self, partition: NewPartitions):\n \"\"\"\n Create a single partition in the in-memory Kafka store.\n\n Parameters:\n - partition (NewPartitions): The partition object to be created.\n \"\"\"\n self.kafka.create_partition(topic=partition.topic, partitions=partition.new_total_count)\n\n def create_topics(self, topics: list[NewTopic]):\n \"\"\"\n Create topics in the in-memory Kafka store.\n\n Parameters:\n - topics (List[NewTopic]): List of topic objects to be created.\n \"\"\"\n for topic in topics:\n self.create_topic(topic=topic)\n\n def create_topic(self, topic: NewTopic):\n \"\"\"\n Create a single topic in the in-memory Kafka store.\n\n Parameters:\n - topic (NewTopic): The topic object to be created.\n \"\"\"\n self.kafka.create_topic(topic.topic)\n self.create_partitions([NewPartitions(topic.topic, topic.num_partitions)])\n\n def delete_topics(self, topics, future=None, request_timeout=None, operation_timeout=None):\n \"\"\"\n Delete topics from the in-memory Kafka store.\n\n Parameters:\n - topics: Topics to be deleted.\n - future: Unused parameter (for compatibility).\n - request_timeout: Unused parameter (for compatibility).\n - operation_timeout: Unused parameter (for compatibility).\n \"\"\"\n for topic in topics:\n self.delete_topic(topic=topic)\n\n def delete_topic(self, topic: NewTopic):\n \"\"\"\n Delete a single topic from the in-memory Kafka store.\n\n Parameters:\n - topic (NewTopic): The topic object to be deleted.\n \"\"\"\n self.kafka.remove_topic(topic=topic.topic)\n\n def describe_acls(self, acl_binding_filter, future, request_timeout=None):\n \"\"\"\n Describe ACLs (unsupported in mockafka).\n\n Parameters:\n - acl_binding_filter: Unused parameter (unsupported).\n - future: Unused parameter (unsupported).\n - request_timeout: Unused parameter (unsupported).\n \"\"\"\n pass\n\n def describe_configs(self, resources, future, request_timeout=None, broker=None):\n \"\"\"\n Describe configurations (unsupported in mockafka).\n\n Parameters:\n - resources: Unused parameter (unsupported).\n - future: Unused parameter (unsupported).\n - request_timeout: Unused parameter (unsupported).\n - broker: Unused parameter (unsupported).\n \"\"\"\n pass\n\n def delete_acls(self, acl_binding_filters, future, request_timeout=None):\n \"\"\"\n Delete ACLs (unsupported in mockafka).\n\n Parameters:\n - acl_binding_filters: Unused parameter (unsupported).\n - future: Unused parameter (unsupported).\n - request_timeout: Unused parameter (unsupported).\n \"\"\"\n pass\n\n def alter_configs(self, *args, **kwargs):\n \"\"\"\n Alter configurations (unsupported in mockafka).\n\n Parameters:\n - args: Unused parameter (unsupported).\n - kwargs: Unused parameter (unsupported).\n \"\"\"\n pass\n\n def create_acls(self, *args, **kwargs):\n \"\"\"\n Create ACLs (unsupported in mockafka).\n\n Parameters:\n - args: Unused parameter (unsupported).\n - kwargs: Unused parameter (unsupported).\n \"\"\"\n pass\n\n def list_groups(self, group=None, *args, **kwargs):\n \"\"\"\n List consumer groups (unsupported in mockafka).\n\n Parameters:\n - group: Unused parameter (unsupported).\n - args: Unused parameter (unsupported).\n - kwargs: Unused parameter (unsupported).\n \"\"\"\n pass\n\n def list_topics(self, topic=None, *args, **kwargs):\n \"\"\"\n List topics (returns ClusterMetadata).\n\n Parameters:\n - topic: Unused parameter (for compatibility).\n - args: Unused parameter (for compatibility).\n - kwargs: Unused parameter (for compatibility).\n\n Returns:\n - ClusterMetadata: Metadata of the listed topics.\n \"\"\"\n return ClusterMetadata(topic)\n\n def poll(self, timeout=None):\n \"\"\"\n Poll for events (unsupported in mockafka).\n\n Parameters:\n - timeout: Unused parameter (unsupported).\n \"\"\"\n pass\n\n def __len__(self, *args, **kwargs):\n \"\"\"\n Get the length of the Kafka store (not implemented).\n\n Parameters:\n - args: Unused parameters (not implemented).\n - kwargs: Unused parameters (not implemented).\n \"\"\"\n pass"
},
{
"identifier": "FakeConsumer",
"path": "mockafka/conumser.py",
"snippet": "class FakeConsumer(object):\n \"\"\"\n Mock implementation of the Confluent Kafka Consumer for testing purposes.\n\n Attributes:\n - kafka (KafkaStore): The in-memory storage for simulating Kafka behavior.\n - consumer_store (dict): Dictionary to store consumer offsets for each topic-partition.\n - subscribed_topic (list): List of topics subscribed by the consumer.\n\n Methods:\n - consume(num_messages=1, *args, **kwargs): Consume messages from subscribed topics.\n - close(*args, **kwargs): Close the consumer and reset state.\n - commit(message: Message = None, *args, **kwargs): Commit offsets for consumed messages.\n - list_topics(topic=None, *args, **kwargs): List topics (returns ClusterMetadata).\n - poll(timeout=None): Poll for messages from subscribed topics.\n - _get_key(topic, partition) -> str: Generate a unique key for a topic-partition pair.\n - subscribe(topics, on_assign=None, *args, **kwargs): Subscribe to one or more topics.\n - unsubscribe(*args, **kwargs): Unsubscribe from one or more topics.\n - assign(partitions): Assign partitions to the consumer (unsupported in mockafka).\n - unassign(*args, **kwargs): Unassign partitions (unsupported in mockafka).\n - assignment(*args, **kwargs) -> list: Get assigned partitions (unsupported in mockafka).\n - committed(partitions, timeout=None) -> list: Get committed offsets (unsupported in mockafka).\n - get_watermark_offsets(partition, timeout=None, *args, **kwargs) -> tuple: Get watermark offsets (unsupported in mockafka).\n - offsets_for_times(partitions, timeout=None) -> list: Get offsets for given times (unsupported in mockafka).\n - pause(partitions) -> None: Pause consumption from specified partitions (unsupported in mockafka).\n - position(partitions) -> list: Get the current position of the consumer in specified partitions (unsupported in mockafka).\n - resume(partitions) -> None: Resume consumption from specified partitions (unsupported in mockafka).\n - seek(partition) -> None: Seek to a specific offset in a partition (unsupported in mockafka).\n - store_offsets(message=None, *args, **kwargs) -> None: Store offsets for consumed messages (unsupported in mockafka).\n - consumer_group_metadata() -> None: Get consumer group metadata (unsupported in mockafka).\n - incremental_assign(partitions) -> None: Incrementally assign partitions (unsupported in mockafka).\n - incremental_unassign(partitions) -> None: Incrementally unassign partitions (unsupported in mockafka).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize the FakeConsumer.\n\n Parameters:\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments (unused).\n \"\"\"\n self.kafka = KafkaStore()\n self.consumer_store = {}\n self.subscribed_topic: list = []\n\n def consume(self, num_messages=1, *args, **kwargs):\n \"\"\"\n Consume messages from subscribed topics.\n\n Parameters:\n - num_messages (int): Number of messages to consume.\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments (unused).\n\n Returns:\n - Message or None: Consumed message or None if no message is available.\n \"\"\"\n return self.poll()\n\n def close(self, *args, **kwargs):\n \"\"\"\n Close the consumer and reset state.\n\n Parameters:\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments (unused).\n \"\"\"\n self.consumer_store = {}\n self.subscribed_topic = []\n\n def commit(self, message: Message = None, *args, **kwargs):\n \"\"\"\n Commit offsets for consumed messages.\n\n Parameters:\n - message (Message): Consumed message (unused).\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments (unused).\n \"\"\"\n if message:\n pass # Commit offsets by changing offset of the topic (not implemented yet)\n else:\n for item in self.consumer_store:\n topic, partition = item.split('*')\n if self.kafka.get_partition_first_offset(topic, partition) <= self.consumer_store[item]:\n self.kafka.set_first_offset(topic=topic, partition=partition, value=self.consumer_store[item])\n\n self.consumer_store = {}\n\n def list_topics(self, topic=None, *args, **kwargs):\n \"\"\"\n List topics (returns ClusterMetadata).\n\n Parameters:\n - topic: Topic name (unused).\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments (unused).\n\n Returns:\n - ClusterMetadata: Metadata of the listed topics.\n \"\"\"\n return ClusterMetadata(topic=topic)\n\n def poll(self, timeout=None):\n \"\"\"\n Poll for messages from subscribed topics.\n\n Parameters:\n - timeout (float): Poll timeout in seconds.\n\n Returns:\n - Message or None: Consumed message or None if no message is available.\n \"\"\"\n if timeout:\n pass\n # sleep(timeout)\n\n for topic in self.subscribed_topic:\n for partition in self.kafka.partition_list(topic=topic):\n first_offset = self.kafka.get_partition_first_offset(topic=topic, partition=partition)\n next_offset = self.kafka.get_partition_next_offset(topic=topic, partition=partition)\n consumer_amount = self.consumer_store.get(self._get_key(topic, partition))\n if first_offset == next_offset:\n continue\n\n if consumer_amount == next_offset:\n continue\n\n if consumer_amount is not None:\n self.consumer_store[self._get_key(topic, partition)] += 1\n else:\n self.consumer_store[self._get_key(topic, partition)] = first_offset + 1\n\n return self.kafka.get_message(topic=topic, partition=partition, offset=first_offset)\n\n return None\n\n def _get_key(self, topic, partition) -> str:\n \"\"\"\n Generate a unique key for a topic-partition pair.\n\n Parameters:\n - topic: Topic name.\n - partition: Partition number.\n\n Returns:\n - str: Unique key for the topic-partition pair.\n \"\"\"\n return f'{topic}*{partition}'\n\n def subscribe(self, topics, on_assign=None, *args, **kwargs):\n \"\"\"\n Subscribe to one or more topics.\n\n Parameters:\n - topics (list): List of topics to subscribe to.\n - on_assign: Callback function for partition assignments (unused).\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments (unused).\n\n Raises:\n - KafkaException: If a subscribed topic does not exist in the Kafka store.\n \"\"\"\n for topic in topics:\n if not self.kafka.is_topic_exist(topic):\n continue\n\n if topic not in self.subscribed_topic:\n self.subscribed_topic.append(topic)\n\n def unsubscribe(self, *args, **kwargs):\n \"\"\"\n Unsubscribe from one or more topics.\n\n Parameters:\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments.\n\n Raises:\n - ValueError: If no 'topics' keyword argument is provided.\n \"\"\"\n topics = kwargs.get('topics', [])\n for topic in topics:\n if topic in self.subscribed_topic:\n self.subscribed_topic.remove(topic)\n\n def assign(self, partitions):\n \"\"\"\n Assign partitions to the consumer (unsupported in mockafka).\n\n Parameters:\n - partitions: Partitions to assign (unused).\n \"\"\"\n pass\n\n def unassign(self, *args, **kwargs):\n \"\"\"\n Unassign partitions (unsupported in mockafka).\n\n Parameters:\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments (unused).\n \"\"\"\n pass\n\n def assignment(self, *args, **kwargs) -> list:\n \"\"\"\n Get assigned partitions (unsupported in mockafka).\n\n Returns:\n - list: An empty list.\n \"\"\"\n return []\n\n def committed(self, partitions, timeout=None) -> list:\n \"\"\"\n Get committed offsets (unsupported in mockafka).\n\n Parameters:\n - partitions: Partitions to get committed offsets for (unused).\n - timeout: Timeout for the operation (unused).\n\n Returns:\n - list: An empty list.\n \"\"\"\n return []\n\n def get_watermark_offsets(self, partition, timeout=None, *args, **kwargs) -> tuple:\n \"\"\"\n Get watermark offsets (unsupported in mockafka).\n\n Parameters:\n - partition: Partition to get watermark offsets for (unused).\n - timeout: Timeout for the operation (unused).\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments (unused).\n\n Returns:\n - tuple: Tuple with watermark offsets (0, 0).\n \"\"\"\n return (0, 0)\n\n def offsets_for_times(self, partitions, timeout=None) -> list:\n \"\"\"\n Get offsets for given times (unsupported in mockafka).\n\n Parameters:\n - partitions: Partitions to get offsets for (unused).\n - timeout: Timeout for the operation (unused).\n\n Returns:\n - list: An empty list.\n \"\"\"\n return []\n\n def pause(self, partitions) -> None:\n \"\"\"\n Pause consumption from specified partitions (unsupported in mockafka).\n\n Parameters:\n - partitions: Partitions to pause consumption from (unused).\n\n Returns:\n - None\n \"\"\"\n return None\n\n def position(self, partitions) -> list:\n \"\"\"\n Get the current position of the consumer in specified partitions (unsupported in mockafka).\n\n Parameters:\n - partitions: Partitions to get position for (unused).\n\n Returns:\n - list: An empty list.\n \"\"\"\n return []\n\n def resume(self, partitions) -> None:\n \"\"\"\n Resume consumption from specified partitions (unsupported in mockafka).\n\n Parameters:\n - partitions: Partitions to resume consumption from (unused).\n\n Returns:\n - None\n \"\"\"\n return None\n\n def seek(self, partition) -> None:\n \"\"\"\n Seek to a specific offset in a partition (unsupported in mockafka).\n\n Parameters:\n - partition: Partition to seek in (unused).\n \"\"\"\n pass\n\n def store_offsets(self, message=None, *args, **kwargs) -> None:\n \"\"\"\n Store offsets for consumed messages (unsupported in mockafka).\n\n Parameters:\n - message: Consumed message (unused).\n - args: Additional arguments (unused).\n - kwargs: Additional keyword arguments (unused).\n\n Returns:\n - None\n \"\"\"\n return None\n\n def consumer_group_metadata(self) -> None:\n \"\"\"\n Get consumer group metadata (unsupported in mockafka).\n\n Returns:\n - None\n \"\"\"\n pass\n\n def incremental_assign(self, partitions) -> None:\n \"\"\"\n Incrementally assign partitions (unsupported in mockafka).\n\n Parameters:\n - partitions: Partitions to incrementally assign (unused).\n\n Returns:\n - None\n \"\"\"\n pass\n\n def incremental_unassign(self, partitions) -> None:\n pass"
},
{
"identifier": "KafkaStore",
"path": "mockafka/kafka_store.py",
"snippet": "class SingletonMeta(type):\nclass KafkaStore(metaclass=SingletonMeta):\n def __call__(cls, *args, **kwargs):\n def __init__(self, clean: bool = False):\n def is_topic_exist(topic: str) -> bool:\n def is_partition_exist_on_topic(cls, topic: str, partition_num: int) -> bool:\n def get_number_of_partition(topic: str) -> int:\n def create_topic(topic: str):\n def create_partition(self, topic: str, partitions: int):\n def remove_topic(self, topic: str):\n def set_first_offset(self, topic: str, partition: int, value: int):\n def _add_next_offset(self, topic: str, partition: int):\n def get_offset_store_key(self, topic: str, partition: int):\n def produce(self, message: Message, topic: str, partition: int):\n def get_message(self, topic: str, partition: int, offset: int) -> Message:\n def get_partition_first_offset(self, topic: str, partition: int) -> int:\n def get_partition_next_offset(self, topic: str, partition: int) -> int:\n def topic_list() -> list[str]:\n def partition_list(topic: str) -> list[int]:\n def get_messages_in_partition(topic: str, partition: int) -> list[Message]:\n def number_of_message_in_topic(self, topic: str) -> int:\n def clear_topic_messages(self, topic: str):\n def clear_partition_messages(topic: str, partition: int):\n def reset_offset(self, topic: str, strategy: str = 'latest'):\n def fresh():\n FIRST_OFFSET = 'first_offset'\n NEXT_OFFSET = 'next_offset'"
},
{
"identifier": "FakeProducer",
"path": "mockafka/producer.py",
"snippet": "class FakeProducer(object):\n def __init__(self, config: dict = None):\n self.kafka = KafkaStore()\n\n def produce(self, topic, value=None, *args, **kwargs):\n # create a message and call produce kafka\n message = Message(value=value, topic=topic, *args, **kwargs)\n self.kafka.produce(message=message, topic=topic, partition=kwargs['partition'])\n\n def list_topics(self, topic=None, *args, **kwargs):\n return ClusterMetadata(topic)\n\n def abort_transaction(self, timeout=None):\n # This method Does not support in mockafka\n pass\n\n def begin_transaction(self):\n # This method Does not support in mockafka\n pass\n\n def commit_transaction(self, timeout=None):\n # This method Does not support in mockafka\n pass\n\n def flush(self, timeout=None):\n # This method Does not support in mockafka\n return 0\n\n def init_transactions(self, timeout=None):\n # This method Does not support in mockafka\n pass\n\n def poll(self, timeout=None):\n # This method Does not support in mockafka\n return 0\n\n def purge(self, in_queue=True, *args, **kwargs):\n # This method Does not support in mockafka\n pass\n\n def send_offsets_to_transaction(self, positions, group_metadata,\n timeout=None):\n # This method Does not support in mockafka\n pass"
}
] | from unittest import TestCase
from mockafka.admin_client import FakeAdminClientImpl
from mockafka.conumser import FakeConsumer
from mockafka.kafka_store import KafkaStore, KafkaException
from mockafka.producer import FakeProducer
import pytest | 5,179 |
class TestFakeConsumer(TestCase):
def setUp(self) -> None:
self.kafka = KafkaStore(clean=True)
self.producer = FakeProducer()
|
class TestFakeConsumer(TestCase):
def setUp(self) -> None:
self.kafka = KafkaStore(clean=True)
self.producer = FakeProducer() | self.consumer = FakeConsumer() | 1 | 2023-10-24 13:27:12+00:00 | 8k |
CuriseJia/FreeStyleRet | test.py | [
{
"identifier": "ShallowStyleRetrieval",
"path": "src/models/style_retrieval.py",
"snippet": "class ShallowStyleRetrieval(nn.Module):\n def __init__(self, model_args):\n super(ShallowStyleRetrieval, self).__init__()\n self.args = model_args\n self.openclip, self.pre_process_train, self.pre_process_val = open_clip.create_model_and_transforms(\n model_name='ViT-L-14', pretrained=self.args.origin_resume)\n self.tokenizer = open_clip.get_tokenizer('ViT-L-14')\n self.openclip.apply(freeze_all_but_bn)\n self.visual = self.openclip.visual\n self.transformer = self.visual.transformer\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss\n self.i2t_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.t2i_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss_1 = self.i2t_loss(image_feature, pair_feature, negative_feature)\n loss_2 = self.t2i_loss(pair_feature, image_feature, negative_feature)\n loss = (loss_1 + loss_2) / 2\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n \n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n \n\n def _visual_forward(self, x):\n gram_prompt = self._get_gram_prompt(x)\n style_prompt = self._get_style_prompt(x)\n\n x = self.visual.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.visual.positional_embedding.to(x.dtype)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.visual.patch_dropout(x)\n x = self.visual.ln_pre(x)\n\n if self.args.prompt_location == 'Shallow':\n\n x = torch.cat([x[:, 0, :].unsqueeze(1), style_prompt, x[:, 1:, :]], dim=1)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.visual.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n \n elif self.args.prompt_location == 'Bottom':\n\n x = x.permute(1, 0, 2) # NLD -> LND\n for r in range(len(self.transformer.resblocks)):\n if r == len(self.transformer.resblocks)-1:\n x = torch.cat([x[0, :, :].unsqueeze(0), \n gram_prompt.permute(1, 0, 2), \n x[1:, :, :]], dim=0)\n x = self.transformer.resblocks[r](x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # if self.visual.attn_pool is not None:\n # x = self.visual.attn_pool(x)\n # x = self.visual.ln_post(x)\n # pooled, tokens = self.visual._global_pool(x)\n # else:\n pooled, tokens = self.visual._global_pool(x)\n pooled = self.visual.ln_post(pooled)\n\n if self.visual.proj is not None:\n pooled = pooled @ self.visual.proj\n\n # if self.visual.output_tokens:\n # return pooled, tokens\n \n return pooled\n \n\n def forward(self, data, dtype='image'):\n if dtype == 'image': \n feat = self._visual_forward(data)\n\n elif dtype == 'text':\n feat = self.openclip.encode_text(data)\n\n return feat"
},
{
"identifier": "DeepStyleRetrieval",
"path": "src/models/style_retrieval.py",
"snippet": "class DeepStyleRetrieval(nn.Module):\n def __init__(self, model_args):\n super(DeepStyleRetrieval, self).__init__()\n self.args = model_args\n self.openclip, self.pre_process_train, self.pre_process_val = open_clip.create_model_and_transforms(\n model_name='ViT-L-14', pretrained=self.args.origin_resume)\n self.tokenizer = open_clip.get_tokenizer('ViT-L-14')\n self.openclip.apply(freeze_all_but_bn)\n self.visual = self.openclip.visual\n self.transformer = self.visual.transformer\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss\n self.i2t_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.t2i_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss_1 = self.i2t_loss(image_feature, pair_feature, negative_feature)\n loss_2 = self.t2i_loss(pair_feature, image_feature, negative_feature)\n loss = (loss_1 + loss_2) / 2\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n \n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n # def _get_style_prompt(self, input):\n # feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(self.args.style_prompts, 128, 112, 112).float().to(self.args.device) # (4, 1605632)\n # # style_feature = torch.tensor(torch.randn(4, 256, 256))\n # style_feature = self.gram_patch(feature)\n # n, c, h, w = style_feature.shape # (b, 256, 7, 7)\n # style_feature = style_feature.view(n, c, -1) # (b*256, 49)\n # style_feature = torch.bmm(style_feature, style_feature.transpose(1, 2))\n \n # gram = self._get_features(input, self.gram_encoder)\n # embed = self.gram_patch(gram['conv3_1'])\n # n, c, h, w = embed.shape\n # gram = embed.view(n, c, -1) # (b*256, 49)\n # gram = torch.bmm(gram, gram.transpose(1, 2))\n # feature = select_style_prompt(gram, style_feature.view(self.args.style_prompts, -1)) # (b, 65536)\n # feature = self.style_patch(feature.view(self.args.batch_size, 256, 16, 16)).view(self.args.batch_size, 256)\n # feature = self.style_linear(feature).unsqueeze(1).repeat(1, self.args.style_prompts, 1)\n\n # return feature\n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n\n\n def _visual_forward(self, x):\n input = x\n self.gram_prompt.parameter = self._get_gram_prompt(input)\n self.style_prompt.parameter = self._get_style_prompt(input)\n\n x = self.visual.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.visual.positional_embedding.to(x.dtype)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.visual.patch_dropout(x)\n x = self.visual.ln_pre(x)\n\n # add style_prompt\n x = torch.cat([x[:, 0, :].unsqueeze(1), self.style_prompt.expand(x.shape[0],-1,-1), x[:, 1:, :]], dim=1)\n\n # add gram_prompt before the last block of transformer\n x = x.permute(1, 0, 2) # NLD -> LND\n for r in range(len(self.transformer.resblocks)):\n if r == len(self.transformer.resblocks)-1:\n x = torch.cat([x[0, :, :].unsqueeze(0), \n self.gram_prompt.expand(self.args.batch_size,-1,-1).permute(1, 0, 2), \n x[1:, :, :]], dim=0)\n x = self.transformer.resblocks[r](x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # if self.visual.attn_pool is not None:\n # x = self.visual.attn_pool(x)\n # x = self.visual.ln_post(x)\n # pooled, tokens = self.visual._global_pool(x)\n # else:\n pooled, tokens = self.visual._global_pool(x)\n pooled = self.visual.ln_post(pooled)\n\n if self.visual.proj is not None:\n pooled = pooled @ self.visual.proj\n\n # if self.visual.output_tokens:\n # return pooled, tokens\n \n return pooled\n \n\n def forward(self, data, dtype='image'):\n if dtype == 'image': \n feat = self._visual_forward(data)\n\n elif dtype == 'text':\n feat = self.openclip.encode_text(data)\n\n return feat"
},
{
"identifier": "BLIP_Retrieval",
"path": "src/models/blip_retrieval.py",
"snippet": "class BLIP_Retrieval(nn.Module):\n def __init__(self, model_args):\n super(BLIP_Retrieval, self).__init__()\n self.args = model_args\n self.blip = blip_retrieval(pretrained=self.args.origin_resume, image_size=224, vit='large', vit_grad_ckpt=True, vit_ckpt_layer=10)\n self.blip.apply(freeze_all_but_bn)\n self.visual = self.blip.visual_encoder.blocks\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss and process\n self.triplet_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.pre_process_train = image_transform(224, True, image_mean, image_std)\n self.pre_process_val = image_transform(224, False, image_mean, image_std)\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n\n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n\n\n def forward(self, data, dtype='image'):\n if dtype == 'image':\n gram_prompt = self._get_gram_prompt(data)\n style_prompt = self._get_style_prompt(data)\n\n feat = self.blip.visual_encoder.patch_embed(data)\n cls_tokens = self.blip.visual_encoder.cls_token.expand(data.shape[0], -1, -1)\n feat = torch.cat((cls_tokens, feat), dim=1)\n feat = feat + self.blip.visual_encoder.pos_embed[:,:feat.size(1),:]\n feat = self.blip.visual_encoder.pos_drop(feat)\n\n feat = torch.cat([feat[:, 0, :].unsqueeze(1), style_prompt, feat[:, 1:, :]], dim=1)\n for r in range(len(self.blip.visual_encoder.blocks)):\n if r == len(self.blip.visual_encoder.blocks)-1:\n feat = torch.cat([feat[:, 0, :].unsqueeze(1), \n gram_prompt,\n feat[:, 1:, :]], dim=1)\n feat = self.blip.visual_encoder.blocks[r](feat)\n \n feat = self.blip.visual_encoder.norm(feat)\n \n ori_embed = F.normalize(self.blip.vision_proj(feat[:,0,:]),dim=-1) \n\n return ori_embed\n \n else:\n text = self.blip.tokenizer(data, padding='max_length', truncation=True, max_length=35, \n return_tensors=\"pt\").to(self.args.device)\n text_output = self.blip.text_encoder(text.input_ids, attention_mask = text.attention_mask, \n return_dict = True, mode = 'text')\n text_feat = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)\n\n return text_feat\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss = self.triplet_loss(image_feature, pair_feature, negative_feature)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()"
},
{
"identifier": "T2ITestDataset",
"path": "src/dataset/data.py",
"snippet": "class T2ITestDataset(Dataset):\n def __init__(self, root_path, json_path, image_transform):\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n caption_path = os.path.join(self.root_path, 'text/'+self.dataset[index]['caption'])\n image_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n \n f = open(caption_path, 'r')\n caption = f.readline().replace('\\n', '')\n pair_image = self.image_transform(Image.open(image_path))\n\n return [caption, pair_image, index]"
},
{
"identifier": "I2ITestDataset",
"path": "src/dataset/data.py",
"snippet": "class I2ITestDataset(Dataset):\n def __init__(self, style, root_path, json_path, image_transform):\n self.style = style\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n ori_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n pair_path = os.path.join(self.root_path, '{}/'.format(self.style)+self.dataset[index]['image'])\n \n ori_image = self.image_transform(Image.open(ori_path))\n pair_image = self.image_transform(Image.open(pair_path))\n\n return [ori_image, pair_image, index]"
},
{
"identifier": "X2ITestDataset",
"path": "src/dataset/data.py",
"snippet": "class X2ITestDataset(Dataset):\n def __init__(self, style, root_path, json_path, image_transform):\n self.style = style\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n caption_path = os.path.join(self.root_path, 'text/'+self.dataset[index]['caption'])\n ori_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n pair_path = os.path.join(self.root_path, '{}/'.format(self.style)+self.dataset[index]['image'])\n \n f = open(caption_path, 'r')\n caption = f.readline().replace('\\n', '')\n ori_image = self.image_transform(Image.open(ori_path))\n pair_image = self.image_transform(Image.open(pair_path))\n\n return [caption, ori_image, pair_image, index]"
},
{
"identifier": "setup_seed",
"path": "src/utils/utils.py",
"snippet": "def setup_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n cudnn.benchmark = True"
},
{
"identifier": "getR1Accuary",
"path": "src/utils/utils.py",
"snippet": "def getR1Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n if temp[i][prob.shape[1]-1] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc"
},
{
"identifier": "getR5Accuary",
"path": "src/utils/utils.py",
"snippet": "def getR5Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n for j in range(prob.shape[1]-4,prob.shape[1]):\n if temp[i][j] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc"
}
] | import argparse
import torch
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.data import DataLoader
from src.models import ShallowStyleRetrieval, DeepStyleRetrieval, BLIP_Retrieval
from src.dataset.data import T2ITestDataset, I2ITestDataset, X2ITestDataset
from src.utils.utils import setup_seed, getR1Accuary, getR5Accuary | 7,072 |
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.')
# project settings
parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path')
parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path')
parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path')
parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy')
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=6, type=int)
# data settings
parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.')
parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.')
parser.add_argument("--test_dataset_path", type=str, default='DSR/')
parser.add_argument("--test_json_path", type=str, default='DSR/test.json')
parser.add_argument("--batch_size", type=int, default=24)
# model settings
parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt')
parser.add_argument('--gram_prompts', type=int, default=4)
parser.add_argument('--gram_prompt_dim', type=int, default=1024)
parser.add_argument('--style_prompts', type=int, default=4)
parser.add_argument('--style_prompt_dim', type=int, default=1024)
args = parser.parse_args()
return args
def eval(args, model, dataloader):
model.eval()
r1 = []
r5 = []
if args.type == 'text2image':
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
image = data[1][1].to(args.device, non_blocking=True)
image_feature = model(image, dtype='image')
text_feature = model(caption, dtype='text')
image_feature = F.normalize(image_feature, dim=-1)
text_feature = F.normalize(text_feature, dim=-1)
prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
|
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.')
# project settings
parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path')
parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path')
parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path')
parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy')
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=6, type=int)
# data settings
parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.')
parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.')
parser.add_argument("--test_dataset_path", type=str, default='DSR/')
parser.add_argument("--test_json_path", type=str, default='DSR/test.json')
parser.add_argument("--batch_size", type=int, default=24)
# model settings
parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt')
parser.add_argument('--gram_prompts', type=int, default=4)
parser.add_argument('--gram_prompt_dim', type=int, default=1024)
parser.add_argument('--style_prompts', type=int, default=4)
parser.add_argument('--style_prompt_dim', type=int, default=1024)
args = parser.parse_args()
return args
def eval(args, model, dataloader):
model.eval()
r1 = []
r5 = []
if args.type == 'text2image':
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
image = data[1][1].to(args.device, non_blocking=True)
image_feature = model(image, dtype='image')
text_feature = model(caption, dtype='text')
image_feature = F.normalize(image_feature, dim=-1)
text_feature = F.normalize(text_feature, dim=-1)
prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1)
r1.append(getR1Accuary(prob)) | r5.append(getR5Accuary(prob)) | 8 | 2023-10-17 09:32:57+00:00 | 8k |
liuqidong07/MOELoRA-peft | run_mlora.py | [
{
"identifier": "main",
"path": "src/MLoRA/main.py",
"snippet": "def main(parser):\n\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n training_args.batched_training = data_args.batched_training # for batched training\n # if model_args.department: # for the department\n # model_args.task_num = model_args.depart_num\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n\n if training_args.should_log:\n # The default of training_args.log_level is passive, so we set log level at info here to have that default.\n transformers.utils.logging.set_verbosity_info()\n\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n # datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n logger.info(f\"Training/evaluation parameters {training_args}\")\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Load dataset\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n extension = data_args.train_file.split(\".\")[-1]\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.validation_file.split(\".\")[-1]\n if data_args.test_file is not None:\n data_files[\"test\"] = data_args.test_file\n extension = data_args.test_file.split(\".\")[-1]\n\n raw_datasets = load_dataset(\n \"json\",\n data_files=data_files,\n cache_dir=model_args.cache_dir,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n print(\"raw_datasets: \", raw_datasets)\n # print(\"raw_datasets: \", len(raw_datasets[\"train\"]))\n\n # Load pretrained model and tokenizer\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n trust_remote_code=True\n )\n config.pre_seq_len = model_args.pre_seq_len\n config.prefix_projection = model_args.prefix_projection\n\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.model_name_or_path,\n trust_remote_code=True,\n )\n\n model = AutoModel.from_pretrained(\n model_args.model_name_or_path,\n trust_remote_code=True\n ).half().cuda() # .half() represents to use half of orginal accuracy\n\n if model_args.peft_path is not None:\n logger.info(\"Peft from pre-trained model\")\n # Resume_training\n if training_args.resume_from_checkpoint is not None:\n model = PeftModel.from_pretrained(model, model_args.peft_path, is_trainable=True)\n else:\n model = PeftModel.from_pretrained(model, model_args.peft_path, is_trainable=False)\n else:\n logger.info(\"Init new peft model\")\n target_modules = model_args.trainable.split(',')\n modules_to_save = model_args.modules_to_save.split(',') if model_args.modules_to_save!=\"null\" else None\n lora_rank = model_args.lora_rank\n lora_dropout = model_args.lora_dropout\n lora_alpha = model_args.lora_alpha\n print(target_modules)\n\n kwargs = {}\n if model_args.lora_name == \"adalora\":\n TargetLoraConfig = AdaLoraConfig\n task_type = TaskType.CAUSAL_LM\n elif model_args.lora_name == \"moelora\":\n TargetLoraConfig = MMOELoraConfigS\n kwargs = {\n \"task_num\": model_args.task_num,\n \"task_embedding_dim\": model_args.task_embedding_dim,\n \"expert_num\": model_args.expert_num,\n }\n task_type = TaskType.CAUSAL_LMS\n else:\n TargetLoraConfig = LoraConfig\n task_type = TaskType.CAUSAL_LM\n \n peft_config = TargetLoraConfig(\n task_type=task_type,\n target_modules=target_modules,\n inference_mode=False,\n r=lora_rank, lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n modules_to_save=modules_to_save,\n **kwargs\n )\n model = get_peft_model(model, peft_config)\n \n\n model.print_trainable_parameters()\n\n task_flag = False # flag whether generate task_id from dataset\n depart_flag = False # flag whether use the department and entity\n if (model_args.lora_name == \"moelora\"):\n task_flag = True\n\n\n prefix = data_args.source_prefix if data_args.source_prefix is not None else \"\"\n\n # Preprocessing the datasets.\n # We need to tokenize inputs and targets.\n if training_args.do_train:\n column_names = raw_datasets[\"train\"].column_names\n elif training_args.do_eval:\n column_names = raw_datasets[\"validation\"].column_names\n elif training_args.do_predict:\n column_names = raw_datasets[\"test\"].column_names\n else:\n logger.info(\"There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.\")\n return\n\n # Get the column names for input/target.\n prompt_column = data_args.prompt_column\n response_column = data_args.response_column\n history_column = data_args.history_column\n \n # Temporarily set max_target_length for training.\n max_target_length = data_args.max_target_length\n\n def print_dataset_example(example):\n print(\"input_ids: \",example[\"input_ids\"])\n print(\"inputs: \", tokenizer.decode(example[\"input_ids\"]))\n print(\"label_ids: \", example[\"labels\"])\n #print(\"labels: \", tokenizer.decode(example[\"labels\"])) # For ChatGLMv2\n \n if model_args.model_name_or_path.split(\"/\")[-1] == \"chatglm-6b\":\n preprocess_function_train = chatglm1_train(data_args, model_args, prompt_column,\n response_column, history_column, prefix,\n tokenizer, task_flag, depart_flag)\n preprocess_function_eval = chatglm1_eval(data_args, model_args, prompt_column,\n response_column, history_column, prefix,\n tokenizer, task_flag, depart_flag)\n elif model_args.model_name_or_path.split(\"/\")[-1] == \"chatglmv2\":\n preprocess_function_train = chatglm2_train(data_args, model_args, prompt_column,\n response_column, history_column, prefix,\n tokenizer, task_flag, model_args.department)\n preprocess_function_eval = chatglm2_eval(data_args, model_args, prompt_column,\n response_column, history_column, prefix,\n tokenizer, task_flag, model_args.department)\n else:\n raise ValueError(\"No such Foundation Model\")\n\n if training_args.do_train:\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n if data_args.max_train_samples is not None:\n max_train_samples = min(len(train_dataset), data_args.max_train_samples)\n train_dataset = train_dataset.select(range(max_train_samples))\n with training_args.main_process_first(desc=\"train dataset map pre-processing\"):\n train_dataset = train_dataset.map(\n preprocess_function_train,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=False,\n desc=\"Running tokenizer on train dataset\",\n )\n print_dataset_example(train_dataset[0])\n print_dataset_example(train_dataset[1])\n train_dataset.set_format(\"torch\")\n\n if training_args.do_eval:\n max_target_length = data_args.val_max_target_length\n if \"validation\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = raw_datasets[\"validation\"]\n if data_args.max_eval_samples is not None:\n max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)\n eval_dataset = eval_dataset.select(range(max_eval_samples))\n with training_args.main_process_first(desc=\"validation dataset map pre-processing\"):\n eval_dataset = eval_dataset.map(\n preprocess_function_eval,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=False,\n desc=\"Running tokenizer on validation dataset\",\n )\n print_dataset_example(eval_dataset[0])\n print_dataset_example(eval_dataset[1])\n\n if training_args.do_predict:\n max_target_length = data_args.val_max_target_length\n if \"test\" not in raw_datasets:\n raise ValueError(\"--do_predict requires a test dataset\")\n predict_dataset = raw_datasets[\"test\"]\n if data_args.max_predict_samples is not None:\n max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)\n predict_dataset = predict_dataset.select(range(max_predict_samples))\n with training_args.main_process_first(desc=\"prediction dataset map pre-processing\"):\n predict_dataset = predict_dataset.map(\n preprocess_function_eval,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=False,\n desc=\"Running tokenizer on prediction dataset\",\n )\n print_dataset_example(predict_dataset[0])\n print_dataset_example(predict_dataset[1])\n predict_dataset.set_format(\"torch\")\n\n # Data collator\n label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id\n \n # if training_args.do_train: # only conduct padding for do_train\n # data_collator = DataCollatorForSeq2Seq(\n # tokenizer,\n # model=model,\n # label_pad_token_id=label_pad_token_id,\n # pad_to_multiple_of=tokenizer.pad_token_id,\n # padding=\"longest\",\n # )\n # else:\n if training_args.do_train:\n data_collator = LongestSequenceCollator(tokenizer, task_flag, depart_flag)\n else:\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n model=model,\n label_pad_token_id=label_pad_token_id,\n pad_to_multiple_of=None,\n padding=False\n )\n\n # Metric\n def compute_metrics(eval_preds):\n preds, labels = eval_preds\n if isinstance(preds, tuple):\n preds = preds[0]\n decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n if data_args.ignore_pad_token_for_loss:\n # Replace -100 in the labels as we can't decode them.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n score_dict = {\n \"rouge-1\": 0,\n \"rouge-2\": 0,\n \"rouge-l\": 0,\n \"bleu-4\": 0,\n }\n return score_dict\n\n # Override the decoding parameters of Seq2SeqTrainer\n training_args.generation_max_length = (\n training_args.generation_max_length\n if training_args.generation_max_length is not None\n else data_args.val_max_target_length\n )\n training_args.generation_num_beams = (\n data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams\n )\n # Initialize our Trainer\n trainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset if training_args.do_train else None,\n eval_dataset=eval_dataset if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics if training_args.predict_with_generate else None,\n save_prefixencoder=model_args.pre_seq_len is not None\n )\n\n # Training\n if training_args.do_train:\n checkpoint = None\n if training_args.resume_from_checkpoint is not None:\n checkpoint = training_args.resume_from_checkpoint\n model.gradient_checkpointing_enable()\n model.enable_input_require_grads()\n train_result = trainer.train(resume_from_checkpoint=checkpoint)\n\n metrics = train_result.metrics\n max_train_samples = (\n data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)\n )\n metrics[\"train_samples\"] = min(max_train_samples, len(train_dataset))\n\n trainer.log_metrics(\"train\", metrics)\n trainer.save_metrics(\"train\", metrics)\n trainer.save_state()\n\n # Evaluation\n results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n metrics = trainer.evaluate(metric_key_prefix=\"eval\", do_sample=True, top_p=0.7, max_length=512, temperature=0.95)\n max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)\n metrics[\"eval_samples\"] = min(max_eval_samples, len(eval_dataset))\n\n trainer.log_metrics(\"eval\", metrics)\n trainer.save_metrics(\"eval\", metrics)\n\n if training_args.do_predict:\n logger.info(\"*** Predict ***\")\n\n # 读取原test file\n list_test_samples = []\n with open(data_args.test_file, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n line = json.loads(line)\n list_test_samples.append(line)\n\n predict_results = trainer.predict(\n predict_dataset,\n metric_key_prefix=\"predict\",\n # max_tokens=512,\n max_new_tokens=data_args.max_target_length,\n do_sample=True,\n top_p=0.7,\n temperature=0.95,\n # repetition_penalty=1.1\n )\n metrics = predict_results.metrics\n print(metrics)\n max_predict_samples = (\n data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)\n )\n metrics[\"predict_samples\"] = min(max_predict_samples, len(predict_dataset))\n\n #trainer.log_metrics(\"predict\", metrics)\n #trainer.save_metrics(\"predict\", metrics)\n\n if trainer.is_world_process_zero():\n if training_args.predict_with_generate:\n predictions = tokenizer.batch_decode(\n predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True\n )\n predictions = [pred.strip() for pred in predictions]\n labels = tokenizer.batch_decode(\n predict_results.label_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True\n )\n labels = [label.strip() for label in labels]\n assert len(labels) == len(list_test_samples)\n\n output_prediction_file = os.path.join(training_args.output_dir, \"test_predictions.json\")\n\n with open(output_prediction_file, \"w\", encoding=\"utf-8\") as writer:\n for idx, (p, l) in enumerate(zip(predictions, labels)):\n samp = list_test_samples[idx]\n samp[\"target\"] = p\n res = json.dumps(samp, ensure_ascii=False)\n writer.write(f\"{res}\\n\")\n\n return results"
},
{
"identifier": "ModelArguments",
"path": "src/MLoRA/arguments.py",
"snippet": "class ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n lora_name: Optional[str] = field(\n default=\"lora\", metadata={\"help\": \"LoRA Type\"}\n )\n ptuning_checkpoint: str = field(\n default=None, metadata={\"help\": \"Path to p-tuning v2 checkpoints\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Will use the token generated when running `huggingface-cli login` (necessary to use this script \"\n \"with private models).\"\n )\n },\n )\n resize_position_embeddings: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": (\n \"Whether to automatically resize the position embeddings if `max_source_length` exceeds \"\n \"the model's position embeddings.\"\n )\n },\n )\n quantization_bit: Optional[int] = field(\n default=None\n )\n pre_seq_len: Optional[int] = field(\n default=None\n )\n prefix_projection: bool = field(\n default=False\n )\n\n trainable: Optional[str] = field(default=\"q_proj,v_proj\")\n lora_rank: Optional[int] = field(default=8)\n lora_dropout: Optional[float] = field(default=0.1)\n lora_alpha: Optional[float] = field(default=32.)\n modules_to_save: Optional[str] = field(default='embed_tokens,lm_head')\n debug_mode: Optional[bool] = field(default=False)\n peft_path: Optional[str] = field(default=None)\n task_num: Optional[int] = field(default=16)\n task_embedding_dim: Optional[int] = field(default=64)\n expert_num: Optional[int] = field(default=4)\n knowledge_r: Optional[int] = field(default=8)\n kmoe_path: Optional[str] = field(default=\"m_saved/lora-0725/checkpoint-8000\")\n freeze: Optional[bool] = field(default=False)\n department: Optional[bool] = field(default=False)\n depart_num: Optional[int] = field(default=16)\n entity_num: Optional[int] = field(default=26)\n bias_weight: Optional[float] = field(default=1)"
},
{
"identifier": "DataTrainingArguments",
"path": "src/MLoRA/arguments.py",
"snippet": "class DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n batched_training: bool = field(\n default=False, metadata={\"help\": \"Use the batched training.\"}\n )\n\n lang: Optional[str] = field(default=None, metadata={\"help\": \"Language id for summarization.\"})\n\n dataset_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n prompt_column: Optional[str] = field(\n default=None,\n metadata={\"help\": \"The name of the column in the datasets containing the full texts (for summarization).\"},\n )\n response_column: Optional[str] = field(\n default=None,\n metadata={\"help\": \"The name of the column in the datasets containing the summaries (for summarization).\"},\n )\n history_column: Optional[str] = field(\n default=None,\n metadata={\"help\": \"The name of the column in the datasets containing the history of chat.\"},\n )\n train_file: Optional[str] = field(\n default=None, metadata={\"help\": \"The input training data file (a jsonlines or csv file).\"}\n )\n validation_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file).\"\n )\n },\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file).\"\n },\n )\n overwrite_cache: bool = field(\n default=True, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_source_length: Optional[int] = field(\n default=1024,\n metadata={\n \"help\": (\n \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n )\n },\n )\n max_target_length: Optional[int] = field(\n default=128,\n metadata={\n \"help\": (\n \"The maximum total sequence length for target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n )\n },\n )\n val_max_target_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"The maximum total sequence length for validation target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.\"\n \"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used \"\n \"during ``evaluate`` and ``predict``.\"\n )\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to pad all samples to model maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n )\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n )\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n )\n },\n )\n max_predict_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"For debugging purposes or quicker training, truncate the number of prediction examples to this \"\n \"value if set.\"\n )\n },\n )\n num_beams: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, \"\n \"which is used during ``evaluate`` and ``predict``.\"\n )\n },\n )\n ignore_pad_token_for_loss: bool = field(\n default=True,\n metadata={\n \"help\": \"Whether to ignore the tokens corresponding to padded labels in the loss computation or not.\"\n },\n )\n source_prefix: Optional[str] = field(\n default=\"\", metadata={\"help\": \"A prefix to add before every source text (useful for T5 models).\"}\n )\n\n forced_bos_token: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"The token to force as the first generated token after the decoder_start_token_id.\"\n \"Useful for multilingual models like mBART where the first generated token\"\n \"needs to be the target language token (Usually it is the target language token)\"\n )\n },\n )\n\n \n\n def __post_init__(self):\n if self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None:\n raise ValueError(\"Need either a dataset name or a training/validation/test file.\")\n else:\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n if self.val_max_target_length is None:\n self.val_max_target_length = self.max_target_length"
}
] | import os
from src.MLoRA.main import main
from transformers import HfArgumentParser, Seq2SeqTrainingArguments
from src.MLoRA.arguments import ModelArguments, DataTrainingArguments | 6,142 | # -*- encoding: utf-8 -*-
# here put the import lib
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
if __name__ == "__main__":
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
| # -*- encoding: utf-8 -*-
# here put the import lib
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
if __name__ == "__main__":
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
| main(parser) | 0 | 2023-10-19 10:55:50+00:00 | 8k |
voyage-ai/voyageai-python | voyageai/api_resources/abstract/engine_api_resource.py | [
{
"identifier": "error",
"path": "voyageai/error.py",
"snippet": "class VoyageError(Exception):\nclass APIError(VoyageError):\nclass TryAgain(VoyageError):\nclass Timeout(VoyageError):\nclass APIConnectionError(VoyageError):\nclass InvalidRequestError(VoyageError):\nclass MalformedRequestError(VoyageError):\nclass AuthenticationError(VoyageError):\nclass RateLimitError(VoyageError):\nclass ServerError(VoyageError):\nclass ServiceUnavailableError(VoyageError):\n def __init__(\n self,\n message=None,\n http_body=None,\n http_status=None,\n json_body=None,\n headers=None,\n code=None,\n ):\n def __str__(self):\n def user_message(self):\n def __repr__(self):\n def construct_error_object(self):\n def __init__(\n self,\n message,\n http_body=None,\n http_status=None,\n json_body=None,\n headers=None,\n code=None,\n should_retry=False,\n ):"
},
{
"identifier": "util",
"path": "voyageai/util.py",
"snippet": "VOYAGE_LOG = os.environ.get(\"VOYAGE_LOG\")\n VOYAGE = 1\nclass ApiType(Enum):\n def from_str(label):\ndef _console_log_level():\ndef log_debug(message, **params):\ndef log_info(message, **params):\ndef log_warn(message, **params):\ndef logfmt(props):\n def fmt(key, val):\ndef convert_to_voyage_object(resp):\ndef convert_to_dict(obj):\ndef merge_dicts(x, y):\ndef default_api_key() -> str:"
},
{
"identifier": "api_requestor",
"path": "voyageai/api_resources/api_requestor.py",
"snippet": "TIMEOUT_SECS = 600\nMAX_SESSION_LIFETIME_SECS = 180\nMAX_CONNECTION_RETRIES = 2\ndef _build_api_url(url, query):\ndef _requests_proxies_arg(proxy) -> Optional[Dict[str, str]]:\ndef _aiohttp_proxies_arg(proxy) -> Optional[str]:\ndef _make_session() -> requests.Session:\ndef parse_stream_helper(line: bytes) -> Optional[str]:\ndef parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:\nasync def parse_stream_async(rbody: aiohttp.StreamReader):\n def __init__(\n self,\n key=None,\n api_base=None,\n api_type=None,\n api_version=None,\n organization=None,\n ):\n def format_app_info(cls, info):\n def _check_polling_response(self, response: VoyageResponse, predicate: Callable[[VoyageResponse], bool]):\n def _poll(\n self,\n method,\n url,\n until,\n failed,\n params = None,\n headers = None,\n interval = None,\n delay = None\n ) -> Tuple[Iterator[VoyageResponse], bool, str]:\n async def _apoll(\n self,\n method,\n url,\n until,\n failed,\n params = None,\n headers = None,\n interval = None,\n delay = None\n ) -> Tuple[Iterator[VoyageResponse], bool, str]:\n def request(\n self,\n method,\n url,\n params,\n headers,\n files,\n stream: Literal[True],\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[Iterator[VoyageResponse], bool, str]:\n def request(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n *,\n stream: Literal[True],\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[Iterator[VoyageResponse], bool, str]:\n def request(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n stream: Literal[False] = ...,\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[VoyageResponse, bool, str]:\n def request(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n stream: bool = ...,\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[Union[VoyageResponse, Iterator[VoyageResponse]], bool, str]:\n def request(\n self,\n method,\n url,\n params=None,\n headers=None,\n files=None,\n stream: bool = False,\n request_id: Optional[str] = None,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None,\n ) -> Tuple[Union[VoyageResponse, Iterator[VoyageResponse]], bool, str]:\n async def arequest(\n self,\n method,\n url,\n params,\n headers,\n files,\n stream: Literal[True],\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[AsyncGenerator[VoyageResponse, None], bool, str]:\n async def arequest(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n *,\n stream: Literal[True],\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[AsyncGenerator[VoyageResponse, None], bool, str]:\n async def arequest(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n stream: Literal[False] = ...,\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[VoyageResponse, bool, str]:\n async def arequest(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n stream: bool = ...,\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[Union[VoyageResponse, AsyncGenerator[VoyageResponse, None]], bool, str]:\n async def arequest(\n self,\n method,\n url,\n params=None,\n headers=None,\n files=None,\n stream: bool = False,\n request_id: Optional[str] = None,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None,\n ) -> Tuple[Union[VoyageResponse, AsyncGenerator[VoyageResponse, None]], bool, str]:\n async def wrap_resp():\n def request_headers(\n self, method: str, extra, request_id: Optional[str]\n ) -> Dict[str, str]:\n def _validate_headers(\n self, supplied_headers: Optional[Dict[str, str]]\n ) -> Dict[str, str]:\n def _prepare_request_raw(\n self,\n url,\n supplied_headers,\n method,\n params,\n files,\n request_id: Optional[str],\n ) -> Tuple[str, Dict[str, str], Optional[bytes]]:\n def request_raw(\n self,\n method,\n url,\n *,\n params=None,\n supplied_headers: Optional[Dict[str, str]] = None,\n files=None,\n stream: bool = False,\n request_id: Optional[str] = None,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None,\n ) -> requests.Response:\n async def arequest_raw(\n self,\n method,\n url,\n session,\n *,\n params=None,\n supplied_headers: Optional[Dict[str, str]] = None,\n files=None,\n request_id: Optional[str] = None,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None,\n ) -> aiohttp.ClientResponse:\n def _interpret_response(\n self, result: requests.Response, stream: bool\n ) -> Tuple[Union[VoyageResponse, Iterator[VoyageResponse]], bool]:\n async def _interpret_async_response(\n self, result: aiohttp.ClientResponse, stream: bool\n ) -> Tuple[Union[VoyageResponse, AsyncGenerator[VoyageResponse, None]], bool]:\n def _interpret_response_line(\n self, rbody: str, rcode: int, rheaders, stream: bool\n ) -> VoyageResponse:\n def handle_error_response(self, rbody, rcode, resp, rheaders, stream_error=False):\n def __init__(self):\n async def __aenter__(self):\n async def __aexit__(self, exc_type, exc_value, traceback):\nclass APIRequestor:\nclass AioHTTPSession(AsyncContextManager):"
},
{
"identifier": "APIResource",
"path": "voyageai/api_resources/abstract/api_resource.py",
"snippet": "class APIResource(VoyageObject):\n api_prefix = \"\"\n\n @classmethod\n def retrieve(\n cls, id, api_key=None, request_id=None, request_timeout=None, **params\n ):\n instance = cls(id=id, api_key=api_key, **params)\n instance.refresh(request_id=request_id, request_timeout=request_timeout)\n return instance\n\n @classmethod\n def aretrieve(\n cls, id, api_key=None, request_id=None, request_timeout=None, **params\n ):\n instance = cls(id=id, api_key=api_key, **params)\n return instance.arefresh(request_id=request_id, request_timeout=request_timeout)\n\n def refresh(self, request_id=None, request_timeout=None):\n self.refresh_from(\n self.request(\n \"get\",\n self.instance_url(),\n request_id=request_id,\n request_timeout=request_timeout,\n )\n )\n return self\n\n async def arefresh(self, request_id=None, request_timeout=None):\n self.refresh_from(\n await self.arequest(\n \"get\",\n self.instance_url(operation=\"refresh\"),\n request_id=request_id,\n request_timeout=request_timeout,\n )\n )\n return self\n\n @classmethod\n def class_url(cls):\n if cls == APIResource:\n raise NotImplementedError(\n \"APIResource is an abstract class. You should perform actions on its subclasses.\"\n )\n # Namespaces are separated in object names with periods (.) and in URLs\n # with forward slashes (/), so replace the former with the latter.\n base = cls.OBJECT_NAME.replace(\".\", \"/\") # type: ignore\n if cls.api_prefix:\n return \"/%s/%s\" % (cls.api_prefix, base)\n return \"/%s\" % (base)\n\n def instance_url(self, operation=None):\n id = self.get(\"id\")\n\n if not isinstance(id, str):\n raise error.InvalidRequestError(\n \"Could not determine which URL to request: %s instance \"\n \"has invalid ID: %r, %s. ID should be of type `str` (or\"\n \" `unicode`)\" % (type(self).__name__, id, type(id)),\n \"id\",\n )\n api_version = self.api_version or voyageai.api_version\n extn = quote_plus(id)\n\n if self.typed_api_type == ApiType.VOYAGE:\n base = self.class_url()\n return \"%s/%s\" % (base, extn)\n\n else:\n raise error.InvalidAPIType(\"Unsupported API type %s\" % self.api_type)\n\n # The `method_` and `url_` arguments are suffixed with an underscore to\n # avoid conflicting with actual request parameters in `params`.\n @classmethod\n def _static_request(\n cls,\n method_,\n url_,\n api_key=None,\n api_base=None,\n api_type=None,\n request_id=None,\n api_version=None,\n organization=None,\n **params,\n ):\n requestor = api_requestor.APIRequestor(\n api_key,\n api_version=api_version,\n organization=organization,\n api_base=api_base,\n api_type=api_type,\n )\n response, _, api_key = requestor.request(\n method_, url_, params, request_id=request_id\n )\n return util.convert_to_voyage_object(\n response, api_key, api_version, organization\n )\n\n @classmethod\n async def _astatic_request(\n cls,\n method_,\n url_,\n api_key=None,\n api_base=None,\n api_type=None,\n request_id=None,\n api_version=None,\n organization=None,\n **params,\n ):\n requestor = api_requestor.APIRequestor(\n api_key,\n api_version=api_version,\n organization=organization,\n api_base=api_base,\n api_type=api_type,\n )\n response, _, api_key = await requestor.arequest(\n method_, url_, params, request_id=request_id\n )\n return response\n\n @classmethod\n def _get_api_type_and_version(\n cls, api_type: Optional[str] = None, api_version: Optional[str] = None\n ):\n typed_api_type = (\n ApiType.from_str(api_type)\n if api_type\n else ApiType.from_str(voyageai.api_type)\n )\n typed_api_version = api_version or voyageai.api_version\n return (typed_api_type, typed_api_version)"
},
{
"identifier": "ApiType",
"path": "voyageai/util.py",
"snippet": "class ApiType(Enum):\n VOYAGE = 1\n\n @staticmethod\n def from_str(label):\n if label.lower() == \"voyage\":\n return ApiType.VOYAGE\n else:\n raise voyageai.error.InvalidAPIType(\n \"The API type provided in invalid. Please select one of the supported API types: 'voyage'\"\n )"
}
] | import time
from pydoc import apropos
from typing import Optional
from urllib.parse import quote_plus
from voyageai import error, util
from voyageai.api_resources import api_requestor
from voyageai.api_resources.abstract.api_resource import APIResource
from voyageai.util import ApiType | 3,720 |
MAX_TIMEOUT = 20
class EngineAPIResource(APIResource):
plain_old_data = False
def __init__(self, engine: Optional[str] = None, **kwargs):
super().__init__(engine=engine, **kwargs)
@classmethod
def class_url(
cls,
engine: Optional[str] = None,
api_type: Optional[str] = None,
api_version: Optional[str] = None,
):
# Namespaces are separated in object names with periods (.) and in URLs
# with forward slashes (/), so replace the former with the latter.
base = cls.OBJECT_NAME.replace(".", "/") # type: ignore
return "/%s" % (base)
@classmethod
def __prepare_create_request(
cls,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
deployment_id = params.pop("deployment_id", None)
engine = params.pop("engine", deployment_id)
model = params.get("model", None)
timeout = params.pop("timeout", None)
stream = params.get("stream", False)
headers = params.pop("headers", None)
request_timeout = params.pop("request_timeout", None)
typed_api_type = cls._get_api_type_and_version(api_type=api_type)[0]
if model is None and engine is None:
|
MAX_TIMEOUT = 20
class EngineAPIResource(APIResource):
plain_old_data = False
def __init__(self, engine: Optional[str] = None, **kwargs):
super().__init__(engine=engine, **kwargs)
@classmethod
def class_url(
cls,
engine: Optional[str] = None,
api_type: Optional[str] = None,
api_version: Optional[str] = None,
):
# Namespaces are separated in object names with periods (.) and in URLs
# with forward slashes (/), so replace the former with the latter.
base = cls.OBJECT_NAME.replace(".", "/") # type: ignore
return "/%s" % (base)
@classmethod
def __prepare_create_request(
cls,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
deployment_id = params.pop("deployment_id", None)
engine = params.pop("engine", deployment_id)
model = params.get("model", None)
timeout = params.pop("timeout", None)
stream = params.get("stream", False)
headers = params.pop("headers", None)
request_timeout = params.pop("request_timeout", None)
typed_api_type = cls._get_api_type_and_version(api_type=api_type)[0]
if model is None and engine is None: | raise error.InvalidRequestError( | 0 | 2023-10-17 22:11:18+00:00 | 8k |
YuroFR/freqtrade-modded-crypto-trading-bot | tests/optimize/test_recursive_analysis.py | [
{
"identifier": "start_recursive_analysis",
"path": "freqtrade/commands/optimize_commands.py",
"snippet": "def start_recursive_analysis(args: Dict[str, Any]) -> None:\n \"\"\"\n Start the backtest recursive tester script\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n from freqtrade.optimize.analysis.recursive_helpers import RecursiveAnalysisSubFunctions\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n RecursiveAnalysisSubFunctions.start(config)"
},
{
"identifier": "get_timerange",
"path": "freqtrade/data/history/history_utils.py",
"snippet": "def get_timerange(data: Dict[str, DataFrame]) -> Tuple[datetime, datetime]:\n \"\"\"\n Get the maximum common timerange for the given backtest data.\n\n :param data: dictionary with preprocessed backtesting data\n :return: tuple containing min_date, max_date\n \"\"\"\n timeranges = [\n (frame['date'].min().to_pydatetime(), frame['date'].max().to_pydatetime())\n for frame in data.values()\n ]\n return (min(timeranges, key=operator.itemgetter(0))[0],\n max(timeranges, key=operator.itemgetter(1))[1])"
},
{
"identifier": "OperationalException",
"path": "freqtrade/exceptions.py",
"snippet": "class OperationalException(FreqtradeException):\n \"\"\"\n Requires manual intervention and will stop the bot.\n Most of the time, this is caused by an invalid Configuration.\n \"\"\""
},
{
"identifier": "RecursiveAnalysis",
"path": "freqtrade/optimize/analysis/recursive.py",
"snippet": "class RecursiveAnalysis(BaseAnalysis):\n\n def __init__(self, config: Dict[str, Any], strategy_obj: Dict):\n\n self._startup_candle = config.get('startup_candle', [199, 399, 499, 999, 1999])\n\n super().__init__(config, strategy_obj)\n\n self.partial_varHolder_array: List[VarHolder] = []\n self.partial_varHolder_lookahead_array: List[VarHolder] = []\n\n self.dict_recursive: Dict[str, Any] = dict()\n\n # For recursive bias check\n # analyzes two data frames with processed indicators and shows differences between them.\n def analyze_indicators(self):\n\n pair_to_check = self.local_config['pairs'][0]\n logger.info(\"Start checking for recursive bias\")\n\n # check and report signals\n base_last_row = self.full_varHolder.indicators[pair_to_check].iloc[-1]\n\n for part in self.partial_varHolder_array:\n part_last_row = part.indicators[pair_to_check].iloc[-1]\n\n compare_df = base_last_row.compare(part_last_row)\n if compare_df.shape[0] > 0:\n # print(compare_df)\n for col_name, values in compare_df.items():\n # print(col_name)\n if 'other' == col_name:\n continue\n indicators = values.index\n\n for indicator in indicators:\n if (indicator not in self.dict_recursive):\n self.dict_recursive[indicator] = {}\n\n values_diff = compare_df.loc[indicator]\n values_diff_self = values_diff.loc['self']\n values_diff_other = values_diff.loc['other']\n diff = (values_diff_other - values_diff_self) / values_diff_self * 100\n\n self.dict_recursive[indicator][part.startup_candle] = f\"{diff:.3f}%\"\n\n else:\n logger.info(\"No variance on indicator(s) found due to recursive formula.\")\n break\n\n # For lookahead bias check\n # analyzes two data frames with processed indicators and shows differences between them.\n def analyze_indicators_lookahead(self):\n\n pair_to_check = self.local_config['pairs'][0]\n logger.info(\"Start checking for lookahead bias on indicators only\")\n\n part = self.partial_varHolder_lookahead_array[0]\n part_last_row = part.indicators[pair_to_check].iloc[-1]\n date_to_check = part_last_row['date']\n index_to_get = (self.full_varHolder.indicators[pair_to_check]['date'] == date_to_check)\n base_row_check = self.full_varHolder.indicators[pair_to_check].loc[index_to_get].iloc[-1]\n\n check_time = part.to_dt.strftime('%Y-%m-%dT%H:%M:%S')\n\n logger.info(f\"Check indicators at {check_time}\")\n # logger.info(f\"vs {part_timerange} with {part.startup_candle} startup candle\")\n\n compare_df = base_row_check.compare(part_last_row)\n if compare_df.shape[0] > 0:\n # print(compare_df)\n for col_name, values in compare_df.items():\n # print(col_name)\n if 'other' == col_name:\n continue\n indicators = values.index\n\n for indicator in indicators:\n logger.info(f\"=> found lookahead in indicator {indicator}\")\n # logger.info(\"base value {:.5f}\".format(values_diff_self))\n # logger.info(\"part value {:.5f}\".format(values_diff_other))\n\n else:\n logger.info(\"No lookahead bias on indicators found.\")\n\n def prepare_data(self, varholder: VarHolder, pairs_to_load: List[DataFrame]):\n\n if 'freqai' in self.local_config and 'identifier' in self.local_config['freqai']:\n # purge previous data if the freqai model is defined\n # (to be sure nothing is carried over from older backtests)\n path_to_current_identifier = (\n Path(f\"{self.local_config['user_data_dir']}/models/\"\n f\"{self.local_config['freqai']['identifier']}\").resolve())\n # remove folder and its contents\n if Path.exists(path_to_current_identifier):\n shutil.rmtree(path_to_current_identifier)\n\n prepare_data_config = deepcopy(self.local_config)\n prepare_data_config['timerange'] = (str(self.dt_to_timestamp(varholder.from_dt)) + \"-\" +\n str(self.dt_to_timestamp(varholder.to_dt)))\n prepare_data_config['exchange']['pair_whitelist'] = pairs_to_load\n\n backtesting = Backtesting(prepare_data_config, self.exchange)\n self.exchange = backtesting.exchange\n backtesting._set_strategy(backtesting.strategylist[0])\n\n varholder.data, varholder.timerange = backtesting.load_bt_data()\n backtesting.load_bt_data_detail()\n varholder.timeframe = backtesting.timeframe\n\n varholder.indicators = backtesting.strategy.advise_all_indicators(varholder.data)\n\n def fill_partial_varholder(self, start_date, startup_candle):\n logger.info(f\"Calculating indicators using startup candle of {startup_candle}.\")\n partial_varHolder = VarHolder()\n\n partial_varHolder.from_dt = start_date\n partial_varHolder.to_dt = self.full_varHolder.to_dt\n partial_varHolder.startup_candle = startup_candle\n\n self.local_config['startup_candle_count'] = startup_candle\n\n self.prepare_data(partial_varHolder, self.local_config['pairs'])\n\n self.partial_varHolder_array.append(partial_varHolder)\n\n def fill_partial_varholder_lookahead(self, end_date):\n logger.info(\"Calculating indicators to test lookahead on indicators.\")\n\n partial_varHolder = VarHolder()\n\n partial_varHolder.from_dt = self.full_varHolder.from_dt\n partial_varHolder.to_dt = end_date\n\n self.prepare_data(partial_varHolder, self.local_config['pairs'])\n\n self.partial_varHolder_lookahead_array.append(partial_varHolder)\n\n def start(self) -> None:\n\n super().start()\n\n reduce_verbosity_for_bias_tester()\n start_date_full = self.full_varHolder.from_dt\n end_date_full = self.full_varHolder.to_dt\n\n timeframe_minutes = timeframe_to_minutes(self.full_varHolder.timeframe)\n\n end_date_partial = start_date_full + timedelta(minutes=int(timeframe_minutes * 10))\n\n self.fill_partial_varholder_lookahead(end_date_partial)\n\n # restore_verbosity_for_bias_tester()\n\n start_date_partial = end_date_full - timedelta(minutes=int(timeframe_minutes))\n\n for startup_candle in self._startup_candle:\n self.fill_partial_varholder(start_date_partial, int(startup_candle))\n\n # Restore verbosity, so it's not too quiet for the next strategy\n restore_verbosity_for_bias_tester()\n\n self.analyze_indicators()\n self.analyze_indicators_lookahead()"
},
{
"identifier": "RecursiveAnalysisSubFunctions",
"path": "freqtrade/optimize/analysis/recursive_helpers.py",
"snippet": "class RecursiveAnalysisSubFunctions:\n\n @staticmethod\n def text_table_recursive_analysis_instances(\n recursive_instances: List[RecursiveAnalysis]):\n startups = recursive_instances[0]._startup_candle\n headers = ['indicators']\n for candle in startups:\n headers.append(candle)\n\n data = []\n for inst in recursive_instances:\n if len(inst.dict_recursive) > 0:\n for indicator, values in inst.dict_recursive.items():\n temp_data = [indicator]\n for candle in startups:\n temp_data.append(values.get(int(candle), '-'))\n data.append(temp_data)\n\n if len(data) > 0:\n from tabulate import tabulate\n table = tabulate(data, headers=headers, tablefmt=\"orgtbl\")\n print(table)\n return table, headers, data\n\n return None, None, data\n\n @staticmethod\n def calculate_config_overrides(config: Config):\n if 'timerange' not in config:\n # setting a timerange is enforced here\n raise OperationalException(\n \"Please set a timerange. \"\n \"A timerange of 5000 candles are enough for recursive analysis.\"\n )\n\n if config.get('backtest_cache') is None:\n config['backtest_cache'] = 'none'\n elif config['backtest_cache'] != 'none':\n logger.info(f\"backtest_cache = \"\n f\"{config['backtest_cache']} detected. \"\n f\"Inside recursive-analysis it is enforced to be 'none'. \"\n f\"Changed it to 'none'\")\n config['backtest_cache'] = 'none'\n return config\n\n @staticmethod\n def initialize_single_recursive_analysis(config: Config, strategy_obj: Dict[str, Any]):\n\n logger.info(f\"Recursive test of {Path(strategy_obj['location']).name} started.\")\n start = time.perf_counter()\n current_instance = RecursiveAnalysis(config, strategy_obj)\n current_instance.start()\n elapsed = time.perf_counter() - start\n logger.info(f\"Checking recursive and indicator-only lookahead bias of indicators \"\n f\"of {Path(strategy_obj['location']).name} \"\n f\"took {elapsed:.0f} seconds.\")\n return current_instance\n\n @staticmethod\n def start(config: Config):\n config = RecursiveAnalysisSubFunctions.calculate_config_overrides(config)\n\n strategy_objs = StrategyResolver.search_all_objects(\n config, enum_failed=False, recursive=config.get('recursive_strategy_search', False))\n\n RecursiveAnalysis_instances = []\n\n # unify --strategy and --strategy-list to one list\n if not (strategy_list := config.get('strategy_list', [])):\n if config.get('strategy') is None:\n raise OperationalException(\n \"No Strategy specified. Please specify a strategy via --strategy\"\n )\n strategy_list = [config['strategy']]\n\n # check if strategies can be properly loaded, only check them if they can be.\n for strat in strategy_list:\n for strategy_obj in strategy_objs:\n if strategy_obj['name'] == strat and strategy_obj not in strategy_list:\n RecursiveAnalysis_instances.append(\n RecursiveAnalysisSubFunctions.initialize_single_recursive_analysis(\n config, strategy_obj))\n break\n\n # report the results\n if RecursiveAnalysis_instances:\n RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances(\n RecursiveAnalysis_instances)\n else:\n logger.error(\"There was no strategy specified through --strategy \"\n \"or timeframe was not specified.\")"
},
{
"identifier": "get_args",
"path": "tests/conftest.py",
"snippet": "def get_args(args):\n return Arguments(args).get_parsed_arg()"
},
{
"identifier": "log_has_re",
"path": "tests/conftest.py",
"snippet": "def log_has_re(line, logs):\n \"\"\"Check if line matches some caplog's message.\"\"\"\n return any(re.match(line, message) for message in logs.messages)"
},
{
"identifier": "patch_exchange",
"path": "tests/conftest.py",
"snippet": "def patch_exchange(\n mocker,\n api_mock=None,\n id='binance',\n mock_markets=True,\n mock_supported_modes=True\n) -> None:\n mocker.patch(f'{EXMS}._load_async_markets', return_value={})\n mocker.patch(f'{EXMS}.validate_config', MagicMock())\n mocker.patch(f'{EXMS}.validate_timeframes', MagicMock())\n mocker.patch(f'{EXMS}.id', PropertyMock(return_value=id))\n mocker.patch(f'{EXMS}.name', PropertyMock(return_value=id.title()))\n mocker.patch(f'{EXMS}.precisionMode', PropertyMock(return_value=2))\n\n if mock_markets:\n if isinstance(mock_markets, bool):\n mock_markets = get_markets()\n mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=mock_markets))\n\n if mock_supported_modes:\n mocker.patch(\n f'freqtrade.exchange.{id}.{id.capitalize()}._supported_trading_mode_margin_pairs',\n PropertyMock(return_value=[\n (TradingMode.MARGIN, MarginMode.CROSS),\n (TradingMode.MARGIN, MarginMode.ISOLATED),\n (TradingMode.FUTURES, MarginMode.CROSS),\n (TradingMode.FUTURES, MarginMode.ISOLATED)\n ])\n )\n\n if api_mock:\n mocker.patch(f'{EXMS}._init_ccxt', return_value=api_mock)\n else:\n mocker.patch(f'{EXMS}._init_ccxt', MagicMock())\n mocker.patch(f'{EXMS}.timeframes', PropertyMock(\n return_value=['5m', '15m', '1h', '1d']))"
}
] | from copy import deepcopy
from pathlib import Path
from unittest.mock import MagicMock, PropertyMock
from freqtrade.commands.optimize_commands import start_recursive_analysis
from freqtrade.data.history import get_timerange
from freqtrade.exceptions import OperationalException
from freqtrade.optimize.analysis.recursive import RecursiveAnalysis
from freqtrade.optimize.analysis.recursive_helpers import RecursiveAnalysisSubFunctions
from tests.conftest import get_args, log_has_re, patch_exchange
import pytest | 3,731 | # pragma pylint: disable=missing-docstring, W0212, line-too-long, C0103, unused-argument
@pytest.fixture
def recursive_conf(default_conf_usdt):
default_conf_usdt['timerange'] = '20220101-20220501'
default_conf_usdt['strategy_path'] = str(
Path(__file__).parent.parent / "strategy/strats")
default_conf_usdt['strategy'] = 'strategy_test_v3_recursive_issue'
default_conf_usdt['pairs'] = ['UNITTEST/USDT']
default_conf_usdt['startup_candle'] = [100]
return default_conf_usdt
def test_start_recursive_analysis(mocker):
single_mock = MagicMock()
text_table_mock = MagicMock()
mocker.patch.multiple(
'freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions',
initialize_single_recursive_analysis=single_mock,
text_table_recursive_analysis_instances=text_table_mock,
)
args = [
"recursive-analysis",
"--strategy",
"strategy_test_v3_recursive_issue",
"--strategy-path",
str(Path(__file__).parent.parent / "strategy/strats"),
"--pairs",
"UNITTEST/BTC",
"--timerange",
"20220101-20220201"
]
pargs = get_args(args)
pargs['config'] = None
start_recursive_analysis(pargs)
assert single_mock.call_count == 1
assert text_table_mock.call_count == 1
single_mock.reset_mock()
# Missing timerange
args = [
"recursive-analysis",
"--strategy",
"strategy_test_v3_with_recursive_bias",
"--strategy-path",
str(Path(__file__).parent.parent / "strategy/strats"),
"--pairs",
"UNITTEST/BTC"
]
pargs = get_args(args)
pargs['config'] = None
| # pragma pylint: disable=missing-docstring, W0212, line-too-long, C0103, unused-argument
@pytest.fixture
def recursive_conf(default_conf_usdt):
default_conf_usdt['timerange'] = '20220101-20220501'
default_conf_usdt['strategy_path'] = str(
Path(__file__).parent.parent / "strategy/strats")
default_conf_usdt['strategy'] = 'strategy_test_v3_recursive_issue'
default_conf_usdt['pairs'] = ['UNITTEST/USDT']
default_conf_usdt['startup_candle'] = [100]
return default_conf_usdt
def test_start_recursive_analysis(mocker):
single_mock = MagicMock()
text_table_mock = MagicMock()
mocker.patch.multiple(
'freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions',
initialize_single_recursive_analysis=single_mock,
text_table_recursive_analysis_instances=text_table_mock,
)
args = [
"recursive-analysis",
"--strategy",
"strategy_test_v3_recursive_issue",
"--strategy-path",
str(Path(__file__).parent.parent / "strategy/strats"),
"--pairs",
"UNITTEST/BTC",
"--timerange",
"20220101-20220201"
]
pargs = get_args(args)
pargs['config'] = None
start_recursive_analysis(pargs)
assert single_mock.call_count == 1
assert text_table_mock.call_count == 1
single_mock.reset_mock()
# Missing timerange
args = [
"recursive-analysis",
"--strategy",
"strategy_test_v3_with_recursive_bias",
"--strategy-path",
str(Path(__file__).parent.parent / "strategy/strats"),
"--pairs",
"UNITTEST/BTC"
]
pargs = get_args(args)
pargs['config'] = None | with pytest.raises(OperationalException, | 2 | 2023-10-21 10:02:05+00:00 | 8k |
yanzhh/HGERE | transformers/src/transformers/modeling_utils.py | [
{
"identifier": "get_activation",
"path": "transformers/src/transformers/activations.py",
"snippet": "def get_activation(activation_string):\n if activation_string in ACT2FN:\n return ACT2FN[activation_string]\n else:\n raise KeyError(\n \"function {} not found in ACT2FN mapping {} or torch.nn.functional\".format(\n activation_string, list(ACT2FN.keys())\n )\n )"
},
{
"identifier": "PretrainedConfig",
"path": "transformers/src/transformers/configuration_utils.py",
"snippet": "class PretrainedConfig(object):\n r\"\"\" Base class for all configuration classes.\n Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations.\n\n Note:\n A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights.\n It only affects the model's configuration.\n\n Class attributes (overridden by derived classes):\n - ``pretrained_config_archive_map``: a python ``dict`` with `shortcut names` (string) as keys and `url` (string) of associated pretrained model configurations as values.\n - ``model_type``: a string that identifies the model type, that we serialize into the JSON file, and that we use to recreate the correct object in :class:`~transformers.AutoConfig`.\n\n Args:\n finetuning_task (:obj:`string` or :obj:`None`, `optional`, defaults to :obj:`None`):\n Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.\n num_labels (:obj:`int`, `optional`, defaults to `2`):\n Number of classes to use when the model is a classification model (sequences/tokens)\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Should the model returns attentions weights.\n output_hidden_states (:obj:`string`, `optional`, defaults to :obj:`False`):\n Should the model returns all hidden-states.\n torchscript (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Is the model used with Torchscript (for PyTorch models).\n \"\"\"\n pretrained_config_archive_map = {} # type: Dict[str, str]\n model_type = \"\" # type: str\n\n def __init__(self, **kwargs):\n # Attributes with defaults\n self.output_attentions = kwargs.pop(\"output_attentions\", False)\n self.output_hidden_states = kwargs.pop(\"output_hidden_states\", False)\n self.output_past = kwargs.pop(\"output_past\", True) # Not used by all models\n self.torchscript = kwargs.pop(\"torchscript\", False) # Only used by PyTorch models\n self.use_bfloat16 = kwargs.pop(\"use_bfloat16\", False)\n self.pruned_heads = kwargs.pop(\"pruned_heads\", {})\n\n # Is decoder is used in encoder-decoder models to differentiate encoder from decoder\n self.is_decoder = kwargs.pop(\"is_decoder\", False)\n\n # Parameters for sequence generation\n self.max_length = kwargs.pop(\"max_length\", 20)\n self.do_sample = kwargs.pop(\"do_sample\", False)\n self.num_beams = kwargs.pop(\"num_beams\", 1)\n self.temperature = kwargs.pop(\"temperature\", 1.0)\n self.top_k = kwargs.pop(\"top_k\", 50)\n self.top_p = kwargs.pop(\"top_p\", 1.0)\n self.repetition_penalty = kwargs.pop(\"repetition_penalty\", 1.0)\n self.bos_token_id = kwargs.pop(\"bos_token_id\", None)\n self.pad_token_id = kwargs.pop(\"pad_token_id\", None)\n self.eos_token_ids = kwargs.pop(\"eos_token_ids\", None)\n self.length_penalty = kwargs.pop(\"length_penalty\", 1.0)\n self.num_return_sequences = kwargs.pop(\"num_return_sequences\", 1)\n\n # Fine-tuning task arguments\n self.architectures = kwargs.pop(\"architectures\", None)\n self.finetuning_task = kwargs.pop(\"finetuning_task\", None)\n self.num_labels = kwargs.pop(\"num_labels\", 2)\n self.id2label = kwargs.pop(\"id2label\", {i: \"LABEL_{}\".format(i) for i in range(self.num_labels)})\n self.id2label = dict((int(key), value) for key, value in self.id2label.items())\n self.label2id = kwargs.pop(\"label2id\", dict(zip(self.id2label.values(), self.id2label.keys())))\n self.label2id = dict((key, int(value)) for key, value in self.label2id.items())\n\n # Additional attributes without default values\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(\"Can't set {} with value {} for {}\".format(key, value, self))\n raise err\n\n def save_pretrained(self, save_directory):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it\n can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.\n\n Args:\n save_directory (:obj:`string`):\n Directory where the configuration JSON file will be saved.\n \"\"\"\n assert os.path.isdir(\n save_directory\n ), \"Saving path should be a directory where the model and configuration can be saved\"\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_config_file = os.path.join(save_directory, CONFIG_NAME)\n\n self.to_json_file(output_config_file)\n logger.info(\"Configuration saved in {}\".format(output_config_file))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> \"PretrainedConfig\":\n r\"\"\"\n\n Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration.\n\n Args:\n pretrained_model_name_or_path (:obj:`string`):\n either:\n - a string with the `shortcut name` of a pre-trained model configuration to load from cache or\n download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to\n our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing a configuration file saved using the\n :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``.\n - a path or url to a saved configuration JSON `file`, e.g.:\n ``./my_model_directory/configuration.json``.\n cache_dir (:obj:`string`, `optional`):\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n kwargs (:obj:`Dict[str, any]`, `optional`):\n The values in kwargs of any keys which are configuration attributes will be used to override the loaded\n values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is\n controlled by the `return_unused_kwargs` keyword parameter.\n force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Force to (re-)download the model weights and configuration files and override the cached versions if they exist.\n resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n proxies (:obj:`Dict`, `optional`):\n A dictionary of proxy servers to use by protocol or endpoint, e.g.:\n :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.`\n The proxies are used on each request.\n return_unused_kwargs: (`optional`) bool:\n If False, then this function returns just the final configuration object.\n If True, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a\n dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part\n of kwargs which has not been used to update `config` and is otherwise ignored.\n\n Returns:\n :class:`PretrainedConfig`: An instance of a configuration object\n\n Examples::\n\n # We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a\n # derived class: BertConfig\n config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.\n config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`\n config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')\n config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)\n assert config.output_attention == True\n config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,\n foo=False, return_unused_kwargs=True)\n assert config.output_attention == True\n assert unused_kwargs == {'foo': False}\n\n \"\"\"\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n return cls.from_dict(config_dict, **kwargs)\n\n @classmethod\n def get_config_dict(\n cls, pretrained_model_name_or_path: str, pretrained_config_archive_map: Optional[Dict] = None, **kwargs\n ) -> Tuple[Dict, Dict]:\n \"\"\"\n From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used\n for instantiating a Config using `from_dict`.\n\n Parameters:\n pretrained_model_name_or_path (:obj:`string`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n pretrained_config_archive_map: (:obj:`Dict[str, str]`, `optional`) Dict:\n A map of `shortcut names` to `url`. By default, will use the current class attribute.\n\n Returns:\n :obj:`Tuple[Dict, Dict]`: The dictionary that will be used to instantiate the configuration object.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n\n if pretrained_config_archive_map is None:\n pretrained_config_archive_map = cls.pretrained_config_archive_map\n\n if pretrained_model_name_or_path in pretrained_config_archive_map:\n config_file = pretrained_config_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)\n elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n else:\n config_file = hf_bucket_url(pretrained_model_name_or_path, postfix=CONFIG_NAME)\n\n try:\n # Load from URL or cache if already cached\n resolved_config_file = cached_path(\n config_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n )\n # Load config dict\n if resolved_config_file is None:\n raise EnvironmentError\n config_dict = cls._dict_from_json_file(resolved_config_file)\n\n except EnvironmentError:\n if pretrained_model_name_or_path in pretrained_config_archive_map:\n msg = \"Couldn't reach server at '{}' to download pretrained model configuration file.\".format(\n config_file\n )\n else:\n msg = (\n \"Model name '{}' was not found in model name list. \"\n \"We assumed '{}' was a path, a model identifier, or url to a configuration file named {} or \"\n \"a directory containing such a file but couldn't find any such file at this path or url.\".format(\n pretrained_model_name_or_path, config_file, CONFIG_NAME,\n )\n )\n raise EnvironmentError(msg)\n\n except json.JSONDecodeError:\n msg = (\n \"Couldn't reach server at '{}' to download configuration file or \"\n \"configuration file is not a valid JSON file. \"\n \"Please check network or file content here: {}.\".format(config_file, resolved_config_file)\n )\n raise EnvironmentError(msg)\n\n if resolved_config_file == config_file:\n logger.info(\"loading configuration file {}\".format(config_file))\n else:\n logger.info(\"loading configuration file {} from cache at {}\".format(config_file, resolved_config_file))\n\n return config_dict, kwargs\n\n @classmethod\n def from_dict(cls, config_dict: Dict, **kwargs) -> \"PretrainedConfig\":\n \"\"\"\n Constructs a `Config` from a Python dictionary of parameters.\n\n Args:\n config_dict (:obj:`Dict[str, any]`):\n Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved\n from a pre-trained checkpoint by leveraging the :func:`~transformers.PretrainedConfig.get_config_dict`\n method.\n kwargs (:obj:`Dict[str, any]`):\n Additional parameters from which to initialize the configuration object.\n\n Returns:\n :class:`PretrainedConfig`: An instance of a configuration object\n \"\"\"\n return_unused_kwargs = kwargs.pop(\"return_unused_kwargs\", False)\n\n config = cls(**config_dict)\n\n if hasattr(config, \"pruned_heads\"):\n config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())\n\n # Update config with kwargs if needed\n to_remove = []\n for key, value in kwargs.items():\n if hasattr(config, key):\n setattr(config, key, value)\n to_remove.append(key)\n for key in to_remove:\n kwargs.pop(key, None)\n\n logger.info(\"Model config %s\", str(config))\n if return_unused_kwargs:\n return config, kwargs\n else:\n return config\n\n @classmethod\n def from_json_file(cls, json_file: str) -> \"PretrainedConfig\":\n \"\"\"\n Constructs a `Config` from the path to a json file of parameters.\n\n Args:\n json_file (:obj:`string`):\n Path to the JSON file containing the parameters.\n\n Returns:\n :class:`PretrainedConfig`: An instance of a configuration object\n\n \"\"\"\n config_dict = cls._dict_from_json_file(json_file)\n return cls(**config_dict)\n\n @classmethod\n def _dict_from_json_file(cls, json_file: str):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __repr__(self):\n return \"{} {}\".format(self.__class__.__name__, self.to_json_string())\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if hasattr(self.__class__, \"model_type\"):\n output[\"model_type\"] = self.__class__.model_type\n return output\n\n def to_json_string(self):\n \"\"\"\n Serializes this instance to a JSON string.\n\n Returns:\n :obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path):\n \"\"\"\n Save this instance to a json file.\n\n Args:\n json_file_path (:obj:`string`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())"
},
{
"identifier": "DUMMY_INPUTS",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]"
},
{
"identifier": "TF2_WEIGHTS_NAME",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "TF2_WEIGHTS_NAME = \"tf_model.h5\""
},
{
"identifier": "TF_WEIGHTS_NAME",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "TF_WEIGHTS_NAME = \"model.ckpt\""
},
{
"identifier": "WEIGHTS_NAME",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "WEIGHTS_NAME = \"pytorch_model.bin\""
},
{
"identifier": "cached_path",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "def cached_path(\n url_or_filename,\n cache_dir=None,\n force_download=False,\n proxies=None,\n resume_download=False,\n user_agent=None,\n extract_compressed_file=False,\n force_extract=False,\n local_files_only=False,\n) -> Optional[str]:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n Args:\n cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).\n force_download: if True, re-dowload the file even if it's already cached in the cache dir.\n resume_download: if True, resume the download if incompletly recieved file is found.\n user_agent: Optional string or dict that will be appended to the user-agent on remote requests.\n extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed\n file in a folder along the archive.\n force_extract: if True when extract_compressed_file is True and the archive was already extracted,\n re-extract the archive and overide the folder where it was extracted.\n\n Return:\n None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).\n Local path (string) otherwise\n \"\"\"\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n if isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n if is_remote_url(url_or_filename):\n # URL, so get it from the cache (downloading if necessary)\n output_path = get_from_cache(\n url_or_filename,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n user_agent=user_agent,\n local_files_only=local_files_only,\n )\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n output_path = url_or_filename\n elif urlparse(url_or_filename).scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))\n\n if extract_compressed_file:\n if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):\n return output_path\n\n # Path where we extract compressed archives\n # We avoid '.' in dir name and add \"-extracted\" at the end: \"./model.zip\" => \"./model-zip-extracted/\"\n output_dir, output_file = os.path.split(output_path)\n output_extract_dir_name = output_file.replace(\".\", \"-\") + \"-extracted\"\n output_path_extracted = os.path.join(output_dir, output_extract_dir_name)\n\n if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:\n return output_path_extracted\n\n # Prevent parallel extractions\n lock_path = output_path + \".lock\"\n with FileLock(lock_path):\n shutil.rmtree(output_path_extracted, ignore_errors=True)\n os.makedirs(output_path_extracted)\n if is_zipfile(output_path):\n with ZipFile(output_path, \"r\") as zip_file:\n zip_file.extractall(output_path_extracted)\n zip_file.close()\n elif tarfile.is_tarfile(output_path):\n tar_file = tarfile.open(output_path)\n tar_file.extractall(output_path_extracted)\n tar_file.close()\n else:\n raise EnvironmentError(\"Archive format of {} could not be identified\".format(output_path))\n\n return output_path_extracted\n\n return output_path"
},
{
"identifier": "hf_bucket_url",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "def hf_bucket_url(identifier, postfix=None, cdn=False) -> str:\n endpoint = CLOUDFRONT_DISTRIB_PREFIX if cdn else S3_BUCKET_PREFIX\n if postfix is None:\n return \"/\".join((endpoint, identifier))\n else:\n return \"/\".join((endpoint, identifier, postfix))"
},
{
"identifier": "is_remote_url",
"path": "transformers/src/transformers/file_utils.py",
"snippet": "def is_remote_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\", \"s3\")"
}
] | import logging
import os
import typing
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .activations import get_activation
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
from torch.nn import Identity
from transformers import load_tf2_checkpoint_in_pytorch_model | 5,881 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
logger = logging.getLogger(__name__)
try:
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class ModuleUtilsMixin:
"""
A few utilities for torch.nn.Modules, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the module.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
class PreTrainedModel(nn.Module, ModuleUtilsMixin):
r""" Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to do a forward pass in the network.
Returns:
torch.Tensor with dummy inputs
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__()
| # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
logger = logging.getLogger(__name__)
try:
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class ModuleUtilsMixin:
"""
A few utilities for torch.nn.Modules, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the module.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
class PreTrainedModel(nn.Module, ModuleUtilsMixin):
r""" Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to do a forward pass in the network.
Returns:
torch.Tensor with dummy inputs
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__() | if not isinstance(config, PretrainedConfig): | 1 | 2023-10-15 02:31:09+00:00 | 8k |
johnyang101/pmpnndiff | data/datamodules.py | [
{
"identifier": "StructureDatasetJSONL",
"path": "data/utils.py",
"snippet": "class StructureDatasetJSONL():\n def __init__(self, jsonl_file, verbose=True, truncate=None, max_length=100,\n alphabet='ACDEFGHIKLMNPQRSTVWYX-', \n esm=False, esm_cfg=None,\n ):\n alphabet_set = set([a for a in alphabet])\n discard_count = {\n 'bad_chars': 0,\n 'too_long': 0,\n 'bad_seq_length': 0\n }\n \n \n with open(jsonl_file) as f:\n self.data = []\n\n lines = f.readlines()\n start = time.time()\n \n for i, line in enumerate(lines):\n entry = json.loads(line)\n seq = entry['seq'].replace('-', 'X')\n name = entry['name']\n\n # Convert raw coords to np arrays\n #for key, val in entry['coords'].items():\n # entry['coords'][key] = np.asarray(val)\n \n if esm: #Loads precomputed ESM2 embeddings.\n assert esm_cfg is not None, 'esm_cfg must be provided if esm=True'\n entry['esm'] = get_esm_embedding(name, esm_cfg, len(seq))\n\n # Check if in alphabet\n bad_chars = set([s for s in seq]).difference(alphabet_set)\n if len(bad_chars) == 0:\n if len(entry['seq']) <= max_length:\n if True:\n self.data.append(entry)\n else:\n discard_count['bad_seq_length'] += 1\n else:\n discard_count['too_long'] += 1\n else:\n print(name, bad_chars, entry['seq'])\n discard_count['bad_chars'] += 1\n\n # Truncate early\n if truncate is not None and len(self.data) == truncate:\n return\n\n if verbose and (i + 1) % 1000 == 0:\n elapsed = time.time() - start\n print('{} entries ({} loaded) in {:.1f} s'.format(len(self.data), i+1, elapsed))\n\n print('discarded', discard_count)\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return self.data[idx]"
},
{
"identifier": "StructureLoader",
"path": "data/utils.py",
"snippet": "class StructureLoader():\n def __init__(self, dataset, batch_size=100, shuffle=True,\n collate_fn=lambda x:x, drop_last=False):\n self.dataset = dataset\n self.size = len(dataset)\n self.lengths = [len(dataset[i]['seq']) for i in range(self.size)]\n self.batch_size = batch_size\n sorted_ix = np.argsort(self.lengths)\n\n # Cluster into batches of similar sizes\n clusters, batch = [], []\n batch_max = 0\n for ix in sorted_ix:\n size = self.lengths[ix]\n if size * (len(batch) + 1) <= self.batch_size:\n batch.append(ix)\n batch_max = size\n else:\n clusters.append(batch)\n batch, batch_max = [], 0\n if len(batch) > 0:\n clusters.append(batch)\n self.clusters = clusters\n\n def __len__(self):\n return len(self.clusters)\n\n def __iter__(self):\n np.random.shuffle(self.clusters)\n for b_idx in self.clusters:\n batch = [self.dataset[i] for i in b_idx]\n yield batch"
},
{
"identifier": "StructureDatasetPDB",
"path": "data/utils.py",
"snippet": "class StructureDatasetPDB():\n def __init__(self, pdb_dict_list, verbose=True, truncate=None, max_length=100,\n alphabet='ACDEFGHIKLMNPQRSTVWYX'):\n alphabet_set = set([a for a in alphabet])\n discard_count = {\n 'bad_chars': 0,\n 'too_long': 0,\n 'bad_seq_length': 0\n }\n\n self.data = []\n\n start = time.time()\n for i, entry in enumerate(pdb_dict_list):\n seq = entry['seq'].replace('-', 'X')\n name = entry['name']\n\n bad_chars = set([s for s in seq]).difference(alphabet_set)\n if len(bad_chars) == 0:\n if len(entry['seq']) <= max_length:\n self.data.append(entry)\n else:\n discard_count['too_long'] += 1\n else:\n #print(name, bad_chars, entry['seq'])\n discard_count['bad_chars'] += 1\n\n # Truncate early\n if truncate is not None and len(self.data) == truncate:\n return\n\n if verbose and (i + 1) % 1000 == 0:\n elapsed = time.time() - start\n #print('{} entries ({} loaded) in {:.1f} s'.format(len(self.data), i+1, elapsed))\n\n #print('Discarded', discard_count)\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return self.data[idx]"
},
{
"identifier": "PDB_dataset",
"path": "data/utils.py",
"snippet": "class PDB_dataset(torch.utils.data.Dataset):\n def __init__(self, IDs, loader, train_dict, params):\n self.IDs = IDs\n self.train_dict = train_dict\n self.loader = loader\n self.params = params\n\n def __len__(self):\n return len(self.IDs)\n\n def __getitem__(self, index):\n ID = self.IDs[index]\n sel_idx = np.random.randint(0, len(self.train_dict[ID]))\n out = self.loader(self.train_dict[ID][sel_idx], self.params)\n return out"
},
{
"identifier": "loader_pdb",
"path": "data/utils.py",
"snippet": "def loader_pdb(item, params):\n\n pdbid,chid = item[0].split('_')\n PREFIX = \"%s/pdb/%s/%s\"%(params['DIR'],pdbid[1:3],pdbid)\n \n # load metadata\n if not os.path.isfile(PREFIX+\".pt\"):\n return {'seq': np.zeros(5)}\n meta = torch.load(PREFIX+\".pt\")\n asmb_ids = meta['asmb_ids']\n asmb_chains = meta['asmb_chains']\n chids = np.array(meta['chains'])\n\n # find candidate assemblies which contain chid chain\n asmb_candidates = set([a for a,b in zip(asmb_ids,asmb_chains)\n if chid in b.split(',')])\n\n # if the chains is missing is missing from all the assemblies\n # then return this chain alone\n if len(asmb_candidates)<1:\n chain = torch.load(\"%s_%s.pt\"%(PREFIX,chid))\n L = len(chain['seq'])\n return {'seq' : chain['seq'],\n 'xyz' : chain['xyz'],\n 'idx' : torch.zeros(L).int(),\n 'masked' : torch.Tensor([0]).int(),\n 'label' : item[0]}\n\n # randomly pick one assembly from candidates\n asmb_i = random.sample(list(asmb_candidates), 1)\n\n # indices of selected transforms\n idx = np.where(np.array(asmb_ids)==asmb_i)[0]\n\n # load relevant chains\n chains = {c:torch.load(\"%s_%s.pt\"%(PREFIX,c))\n for i in idx for c in asmb_chains[i]\n if c in meta['chains']}\n\n # generate assembly\n asmb = {}\n for k in idx:\n\n # pick k-th xform\n xform = meta['asmb_xform%d'%k]\n u = xform[:,:3,:3]\n r = xform[:,:3,3]\n\n # select chains which k-th xform should be applied to\n s1 = set(meta['chains'])\n s2 = set(asmb_chains[k].split(','))\n chains_k = s1&s2\n\n # transform selected chains \n for c in chains_k:\n try:\n xyz = chains[c]['xyz']\n xyz_ru = torch.einsum('bij,raj->brai', u, xyz) + r[:,None,None,:]\n asmb.update({(c,k,i):xyz_i for i,xyz_i in enumerate(xyz_ru)})\n except KeyError:\n return {'seq': np.zeros(5)}\n\n # select chains which share considerable similarity to chid\n seqid = meta['tm'][chids==chid][0,:,1]\n homo = set([ch_j for seqid_j,ch_j in zip(seqid,chids)\n if seqid_j>params['HOMO']])\n # stack all chains in the assembly together\n seq,xyz,idx,masked = \"\",[],[],[]\n seq_list = []\n for counter,(k,v) in enumerate(asmb.items()):\n seq += chains[k[0]]['seq']\n seq_list.append(chains[k[0]]['seq'])\n xyz.append(v)\n idx.append(torch.full((v.shape[0],),counter))\n if k[0] in homo:\n masked.append(counter)\n\n return {'seq' : seq,\n 'xyz' : torch.cat(xyz,dim=0),\n 'idx' : torch.cat(idx,dim=0),\n 'masked' : torch.Tensor(masked).int(),\n 'label' : item[0]}"
},
{
"identifier": "build_training_clusters",
"path": "data/utils.py",
"snippet": "def build_training_clusters(params, debug):\n val_ids = set([int(l) for l in open(params['VAL']).readlines()])\n test_ids = set([int(l) for l in open(params['TEST']).readlines()])\n \n if debug:\n val_ids = []\n test_ids = []\n \n # read & clean list.csv\n with open(params['LIST'], 'r') as f:\n reader = csv.reader(f)\n next(reader)\n rows = [[r[0],r[3],int(r[4])] for r in reader\n if float(r[2])<=params['RESCUT'] and\n parser.parse(r[1])<=parser.parse(params['DATCUT'])]\n \n # compile training and validation sets\n train = {}\n valid = {}\n test = {}\n\n if debug:\n rows = rows[:20]\n for r in rows:\n if r[2] in val_ids:\n if r[2] in valid.keys():\n valid[r[2]].append(r[:2])\n else:\n valid[r[2]] = [r[:2]]\n elif r[2] in test_ids:\n if r[2] in test.keys():\n test[r[2]].append(r[:2])\n else:\n test[r[2]] = [r[:2]]\n else:\n if r[2] in train.keys():\n train[r[2]].append(r[:2])\n else:\n train[r[2]] = [r[:2]]\n if debug:\n valid=train \n return train, valid, test"
},
{
"identifier": "worker_init_fn",
"path": "data/utils.py",
"snippet": "def worker_init_fn(worker_id):\n np.random.seed()"
},
{
"identifier": "get_pdbs",
"path": "data/utils.py",
"snippet": "def get_pdbs(data_loader, repeat=1, max_length=10000, num_units=1000000):\n init_alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G','H', 'I', 'J','K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T','U', 'V','W','X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g','h', 'i', 'j','k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't','u', 'v','w','x', 'y', 'z']\n extra_alphabet = [str(item) for item in list(np.arange(300))]\n chain_alphabet = init_alphabet + extra_alphabet\n c = 0\n c1 = 0\n pdb_dict_list = []\n t0 = time.time()\n for _ in range(repeat):\n for step,t in enumerate(data_loader): #Can be parallelized.\n t = {k:v[0] for k,v in t.items()}\n c1 += 1\n if 'label' in list(t):\n my_dict = {}\n s = 0\n concat_seq = ''\n concat_N = []\n concat_CA = []\n concat_C = []\n concat_O = []\n concat_mask = []\n coords_dict = {}\n mask_list = []\n visible_list = []\n if len(list(np.unique(t['idx']))) < 352:\n for idx in list(np.unique(t['idx'])):\n letter = chain_alphabet[idx]\n res = np.argwhere(t['idx']==idx)\n initial_sequence= \"\".join(list(np.array(list(t['seq']))[res][0,]))\n if initial_sequence[-6:] == \"HHHHHH\":\n res = res[:,:-6]\n if initial_sequence[0:6] == \"HHHHHH\":\n res = res[:,6:]\n if initial_sequence[-7:-1] == \"HHHHHH\":\n res = res[:,:-7]\n if initial_sequence[-8:-2] == \"HHHHHH\":\n res = res[:,:-8]\n if initial_sequence[-9:-3] == \"HHHHHH\":\n res = res[:,:-9]\n if initial_sequence[-10:-4] == \"HHHHHH\":\n res = res[:,:-10]\n if initial_sequence[1:7] == \"HHHHHH\":\n res = res[:,7:]\n if initial_sequence[2:8] == \"HHHHHH\":\n res = res[:,8:]\n if initial_sequence[3:9] == \"HHHHHH\":\n res = res[:,9:]\n if initial_sequence[4:10] == \"HHHHHH\":\n res = res[:,10:]\n if res.shape[1] < 4:\n pass\n else:\n my_dict['seq_chain_'+letter]= \"\".join(list(np.array(list(t['seq']))[res][0,]))\n concat_seq += my_dict['seq_chain_'+letter]\n if idx in t['masked']:\n mask_list.append(letter)\n else:\n visible_list.append(letter)\n coords_dict_chain = {}\n all_atoms = np.array(t['xyz'][res,])[0,] #[L, 14, 3]\n coords_dict_chain['N_chain_'+letter]=all_atoms[:,0,:].tolist()\n coords_dict_chain['CA_chain_'+letter]=all_atoms[:,1,:].tolist()\n coords_dict_chain['C_chain_'+letter]=all_atoms[:,2,:].tolist()\n coords_dict_chain['O_chain_'+letter]=all_atoms[:,3,:].tolist()\n my_dict['coords_chain_'+letter]=coords_dict_chain\n my_dict['name']= t['label']\n my_dict['masked_list']= mask_list\n my_dict['visible_list']= visible_list\n my_dict['num_of_chains'] = len(mask_list) + len(visible_list)\n my_dict['seq'] = concat_seq\n if len(concat_seq) <= max_length:\n pdb_dict_list.append(my_dict)\n if len(pdb_dict_list) >= num_units:\n break\n return pdb_dict_list"
}
] | import torch
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from data.utils import StructureDatasetJSONL, StructureLoader
from data.utils import StructureDatasetPDB, PDB_dataset, loader_pdb, build_training_clusters, worker_init_fn, get_pdbs | 3,832 |
class Generic_PMPNN_DM(pl.LightningDataModule):
def __init__(self, cfg, debug, truncate=None):
super().__init__()
self.cfg = cfg
self.debug = debug
self.truncate = 100 if debug and truncate is None else truncate
self.memorize = True if self.truncate == 1 else False
self.batch_size = cfg.batch_size
print(f'Debug: {self.debug}')
print(f"Batch size: {self.batch_size}")
|
class Generic_PMPNN_DM(pl.LightningDataModule):
def __init__(self, cfg, debug, truncate=None):
super().__init__()
self.cfg = cfg
self.debug = debug
self.truncate = 100 if debug and truncate is None else truncate
self.memorize = True if self.truncate == 1 else False
self.batch_size = cfg.batch_size
print(f'Debug: {self.debug}')
print(f"Batch size: {self.batch_size}")
| def train_dataloader(self) -> StructureLoader: | 1 | 2023-10-16 08:47:43+00:00 | 8k |
generative-skill-chaining/gsc-code | generative_skill_chaining/envs/pybullet/real/object_tracker.py | [
{
"identifier": "redisgl",
"path": "generative_skill_chaining/envs/pybullet/real/redisgl.py",
"snippet": "KEY_ARGS = \"webapp::simulator::args\"\nKEY_RESOURCES = \"webapp::resources::simulator\"\nclass Pose:\nclass Geometry(abc.ABC):\nclass Box(Geometry):\nclass Capsule(Geometry):\nclass Cylinder(Geometry):\nclass Sphere(Geometry):\nclass Mesh(Geometry):\nclass Material:\nclass Graphics:\nclass ModelKeys:\nclass ObjectModel:\nclass RobotModel:\n def from_dict(self, pose: Dict[str, Any]) -> \"Pose\":\n def to_dict(self) -> Dict[str, Any]:\n def to_dict(self) -> Dict[str, Any]:\n def __init__(self, scale: Union[Sequence[float], np.ndarray]):\n def to_dict(self) -> Dict[str, Any]:\n def __init__(self, radius: float, length: float):\n def to_dict(self) -> Dict[str, Any]:\n def __init__(self, radius: float, length: float):\n def to_dict(self) -> Dict[str, Any]:\n def __init__(self, radius: float):\n def to_dict(self) -> Dict[str, Any]:\n def __init__(self, path: str, scale: Union[Sequence[float], np.ndarray]):\n def to_dict(self) -> Dict[str, Any]:\n def to_dict(self) -> Dict[str, Any]:\n def to_dict(self) -> Dict[str, Any]:\ndef register_resource_path(redis: ctrlutils.RedisClient, path: str) -> None:\ndef unregister_resource_path(redis: ctrlutils.RedisClient, path: str) -> None:\n def __init__(self, key_namespace: str):\n def to_dict(self) -> Dict[str, Any]:\ndef register_model_keys(redis: ctrlutils.RedisClient, model_keys: ModelKeys) -> None:\ndef unregister_model_keys(redis: ctrlutils.RedisClient, model_keys: ModelKeys) -> None:\n def to_dict(self) -> Dict[str, Any]:\ndef register_object(\n redis: ctrlutils.RedisClient, model_keys: ModelKeys, object: ObjectModel\n) -> None:\ndef unregister_object(\n redis: ctrlutils.RedisClient, model_keys: ModelKeys, name: str\n) -> None:\n def to_dict(self) -> Dict[str, Any]:\ndef register_robot(\n redis: ctrlutils.RedisClient, model_keys: ModelKeys, robot: RobotModel\n) -> None:"
},
{
"identifier": "math",
"path": "generative_skill_chaining/envs/pybullet/sim/math.py",
"snippet": "PYBULLET_STEPS_PER_SEC = 240\nPYBULLET_TIMESTEP = 1 / PYBULLET_STEPS_PER_SEC\nclass Pose:\n def from_eigen(pose: eigen.Isometry3d) -> \"Pose\":\n def to_eigen(self) -> eigen.Isometry3d:\ndef comb(n: int, r: int) -> int:"
},
{
"identifier": "shapes",
"path": "generative_skill_chaining/envs/pybullet/sim/shapes.py",
"snippet": "def create_body(\n shapes: Union[\"Shape\", Sequence[\"Shape\"]],\n link_parents: Optional[Sequence[int]] = None,\n physics_id: int = 0,\n) -> int:\n def visual_kwargs(\n self, is_base: bool = False\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n def create_visual(self, physics_id: int, is_base: bool = False) -> Tuple[int, int]:\n def visual_kwargs(\n self, is_base: bool = False\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n def visual_kwargs(\n self, is_base: bool = False\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n def visual_kwargs(\n self, is_base: bool = False\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\nclass JointType(enum.IntEnum):\nclass Joint:\nclass Shape:\nclass Box(Shape):\nclass Cylinder(Shape):\nclass Sphere(Shape):\n REVOLUTE = p.JOINT_REVOLUTE\n PRISMATIC = p.JOINT_PRISMATIC\n SPHERICAL = p.JOINT_SPHERICAL\n FIXED = p.JOINT_FIXED"
},
{
"identifier": "Object",
"path": "generative_skill_chaining/envs/pybullet/table/objects.py",
"snippet": "class Object(body.Body):\n name: str\n is_static: bool = False\n\n def __init__(\n self, physics_id: int, body_id: int, name: str, is_static: bool = False\n ):\n super().__init__(physics_id, body_id)\n\n self.name = name\n self.is_static = is_static\n\n T_pybullet_to_obj = super().pose().to_eigen()\n self._modified_axes = not T_pybullet_to_obj.is_approx(\n eigen.Isometry3d.identity()\n )\n if self._modified_axes:\n self._T_pybullet_to_obj = T_pybullet_to_obj\n self._T_obj_to_pybullet = T_pybullet_to_obj.inverse()\n\n self._state = object_state.ObjectState()\n\n def pose(self) -> math.Pose:\n if not self._modified_axes:\n return super().pose()\n\n return math.Pose.from_eigen(super().pose().to_eigen() * self._T_obj_to_pybullet)\n\n def set_pose(self, pose: math.Pose) -> None:\n if not self._modified_axes:\n return super().set_pose(pose)\n\n return super().set_pose(\n math.Pose.from_eigen(pose.to_eigen() * self._T_pybullet_to_obj)\n )\n\n def disable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 0, 0, physicsClientId=self.physics_id\n )\n\n def enable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 1, 0xFF, physicsClientId=self.physics_id\n )\n\n @property\n def inertia(self) -> dyn.SpatialInertiad:\n try:\n return self._obj_inertia # type: ignore\n except AttributeError:\n pass\n\n self._obj_inertia = super().inertia\n if self._modified_axes:\n self._obj_inertia = self._obj_inertia * self._T_pybullet_to_obj\n\n T_world_to_obj = self.pose().to_eigen().inverse()\n for link_id in range(self.dof):\n link = body.Link(self.physics_id, self.body_id, link_id)\n T_link_to_obj = T_world_to_obj * link.pose().to_eigen()\n self._obj_inertia += link.inertia * T_link_to_obj\n\n return self._obj_inertia\n\n def state(self) -> object_state.ObjectState:\n pose = self.pose()\n aa = eigen.AngleAxisd(eigen.Quaterniond(pose.quat))\n self._state.pos = pose.pos\n self._state.aa = aa.angle * aa.axis\n\n return self._state\n\n def set_state(self, state: object_state.ObjectState) -> None:\n self.set_pose(state.pose())\n\n def reset(self, action_skeleton: List) -> None:\n pass\n\n @classmethod\n def create(\n cls,\n physics_id: int,\n object_type: Optional[str],\n object_kwargs: Dict[str, Any] = {},\n object_groups: Dict[str, \"ObjectGroup\"] = {},\n **kwargs,\n ) -> \"Object\":\n object_class = Null if object_type is None else globals()[object_type]\n if issubclass(object_class, Variant):\n kwargs[\"object_groups\"] = object_groups\n object_kwargs = object_kwargs.copy()\n object_kwargs.update(kwargs)\n return object_class(physics_id=physics_id, **object_kwargs)\n\n def isinstance(self, class_or_tuple: Union[type, Tuple[type, ...]]) -> bool:\n return isinstance(self, class_or_tuple)\n\n def type(self) -> Type[\"Object\"]:\n return type(self)\n\n @property\n def size(self) -> np.ndarray:\n raise NotImplementedError\n\n @property\n def bbox(self) -> np.ndarray:\n \"\"\"Returns the bounding box in the object frame.\n\n If the origin of the object is at its geometric center, this will be\n equivalent to `(-0.5 * self.size, 0.5 * self.size)`.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n raise NotImplementedError\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the object's convex hull.\n\n These hulls will be used for rough collision checking. By default,\n the vertices will be the 6 corners of the object's bounding box\n (`Object.bbox`).\n\n Args:\n world_frame: Whether to transform the vertices in world frame or\n leave them in object frame.\n project_2d: Whether to return the 2d convex hull.\n\n Returns:\n List of arrays of shape [_, 3] or [_, 2], where each array is a\n convex hull.\n \"\"\"\n pose = self.pose() if world_frame else None\n vertices = compute_bbox_vertices(self.bbox, pose, project_2d)\n\n return [vertices]\n\n def aabb(self) -> np.ndarray:\n \"\"\"Computes the axis-aligned bounding box from the object pose and size.\n\n This should be more accurate than `super().aabb()`, which gets the aabb\n from Pybullet. Pybullet returns an *enlarged* aabb for the object *base*\n link, while this returns the exact aabb for the entire object.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n vertices = np.concatenate(self.convex_hulls(world_frame=True), axis=0)\n xyz_min = vertices.min(axis=0)\n xyz_max = vertices.max(axis=0)\n\n return np.array([xyz_min, xyz_max])\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return []\n\n def __str__(self) -> str:\n return self.name\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n def __eq__(self, other) -> bool:\n return str(self) == str(other)"
},
{
"identifier": "Variant",
"path": "generative_skill_chaining/envs/pybullet/table/objects.py",
"snippet": "class Variant(WrapperObject):\n def __init__(\n self,\n physics_id: int,\n name: str,\n variants: Optional[List[Dict[str, Any]]] = None,\n group: Optional[str] = None,\n object_groups: Dict[str, ObjectGroup] = {},\n ):\n from generative_skill_chaining.envs.pybullet.table.utils import load_config\n\n self.physics_id = physics_id\n self.name = name\n\n if variants is None and group is None:\n raise ValueError(\"One of variants or group must be specified\")\n elif variants is not None and group is not None:\n raise ValueError(\"Only one of variants or group can be specified\")\n\n if variants is not None:\n self._variants: Union[List[Object], ObjectGroup] = [\n Object.create(\n physics_id=self.physics_id,\n name=self.name,\n **load_config(obj_config),\n )\n for obj_config in variants\n ]\n self._real_indices = [\n i\n for i, variant in enumerate(self.variants)\n if not variant.isinstance(Null)\n ]\n else:\n assert group is not None\n self._variants = object_groups[group]\n\n self._body: Optional[Object] = None\n self._idx_variant: Optional[int] = None\n\n @property\n def body(self) -> Object: # type: ignore\n if self._body is None:\n raise RuntimeError(\"Variant.reset() must be called first\")\n return self._body\n\n @property\n def variants(self) -> Union[List[Object], ObjectGroup]:\n return self._variants\n\n def set_variant(\n self, idx_variant: Optional[int], action_skeleton: List, lock: bool = False\n ) -> None:\n \"\"\"Sets the variant for debugging purposes.\n\n Args:\n idx_variant: Index of the variant to set.\n action_skeleton: List of primitives used to identify required objects.\n lock: Whether to lock the variant so it remains the same upon resetting.\n \"\"\"\n if isinstance(self.variants, ObjectGroup):\n idx_variant = self.variants.pop_index(self, idx_variant)\n else:\n if idx_variant is None:\n if any(self in primitive.arg_objects for primitive in action_skeleton):\n idx_variant = random.choice(self._real_indices)\n else:\n idx_variant = random.randrange(len(self.variants))\n\n # Hide unused variants below table.\n for i, obj in enumerate(self.variants):\n if i == idx_variant:\n continue\n obj.disable_collisions()\n obj.set_pose(math.Pose(pos=np.array([0.0, 0.0, -0.5])))\n obj.freeze()\n\n self._body = self.variants[idx_variant]\n if lock:\n self._idx_variant = idx_variant\n\n def reset(self, action_skeleton: List) -> None:\n self.set_variant(self._idx_variant, action_skeleton)\n self.enable_collisions()\n self.unfreeze()\n self.body.reset(action_skeleton)"
}
] | import pathlib
import ctrlutils
import numpy as np
from typing import Dict, Iterable, List, Optional, Sequence, Union
from ctrlutils import eigen
from generative_skill_chaining.envs.pybullet.real import redisgl
from generative_skill_chaining.envs.pybullet.sim import math, shapes
from generative_skill_chaining.envs.pybullet.table.objects import Object, Variant | 4,353 | )
quat = eigen.Quaterniond(shape.pose.quat) * quat_pybullet_to_redisgl
return redisgl.Pose(shape.pose.pos, quat.coeffs)
else:
return redisgl.Pose(shape.pose.pos, shape.pose.quat)
def create_geometry(shape: shapes.Shape) -> redisgl.Geometry:
if isinstance(shape, shapes.Box):
return redisgl.Box(scale=shape.size)
elif isinstance(shape, shapes.Cylinder):
return redisgl.Cylinder(radius=shape.radius, length=shape.length)
elif isinstance(shape, shapes.Sphere):
return redisgl.Sphere(radius=shape.radius)
raise NotImplementedError(f"Shape type {shape} is not supported.")
def create_graphics(object: Object) -> Sequence[redisgl.Graphics]:
if isinstance(object, Variant):
return []
return [
redisgl.Graphics(
name=object.name,
geometry=create_geometry(shape),
T_to_parent=create_pose(shape),
)
for shape in object.shapes
]
def create_object_model(object: Object, key_namespace: str) -> redisgl.ObjectModel:
return redisgl.ObjectModel(
name=object.name,
graphics=create_graphics(object),
key_pos=f"{key_namespace}::objects::{object.name}::pos",
key_ori=f"{key_namespace}::objects::{object.name}::ori",
)
class ObjectTracker:
def __init__(
self,
objects: Dict[str, Object],
redis_host: str,
redis_port: int,
redis_password: str,
key_namespace: str,
object_key_prefix: str,
assets_path: Union[str, pathlib.Path],
):
self._redis = ctrlutils.RedisClient(redis_host, redis_port, redis_password)
self._redis_pipe = self._redis.pipeline()
self._object_key_prefix = object_key_prefix
self._assets_path = str(pathlib.Path(assets_path).absolute())
redisgl.register_resource_path(self._redis_pipe, self._assets_path)
self._model_keys = redisgl.ModelKeys(key_namespace)
redisgl.register_model_keys(self._redis_pipe, self._model_keys)
self._redis_pipe.execute()
self._tracked_objects = [] # self.get_tracked_objects(objects.values())
for object in objects.values():
try:
redisgl.register_object(
self._redis_pipe,
self._model_keys,
object=create_object_model(object, key_namespace),
)
except NotImplementedError:
continue
self._tracked_objects.append(object)
self._redis_pipe.execute()
def __del__(self) -> None:
redisgl.unregister_resource_path(self._redis_pipe, self._assets_path)
redisgl.unregister_model_keys(self._redis_pipe, self._model_keys)
for object in self._tracked_objects:
redisgl.unregister_object(self._redis_pipe, self._model_keys, object.name)
self._redis_pipe.execute()
def get_tracked_objects(self, objects: Iterable[Object]) -> List[Object]:
for object in objects:
self._redis_pipe.get(self._object_key_prefix + object.name + "::pos")
object_models = self._redis_pipe.execute()
return [
object
for object, object_model in zip(objects, object_models)
if object_model is not None
]
def update_poses(
self,
objects: Optional[Iterable[Object]] = None,
exclude: Optional[Sequence[Object]] = None,
) -> List[Object]:
if objects is None:
objects = self._tracked_objects
# Query all object poses.
for object in objects:
self._redis_pipe.get(self._object_key_prefix + object.name + "::pos")
self._redis_pipe.get(self._object_key_prefix + object.name + "::ori")
b_object_poses = self._redis_pipe.execute()
# Set returned poses.
updated_objects = []
for i, object in enumerate(objects):
if exclude is not None and object in exclude:
continue
b_object_pos = b_object_poses[2 * i]
b_object_quat = b_object_poses[2 * i + 1]
if b_object_pos is None or b_object_quat is None:
continue
object_pos = ctrlutils.redis.decode_matlab(b_object_pos)
object_quat = ctrlutils.redis.decode_matlab(b_object_quat)
|
def create_pose(shape: shapes.Shape) -> redisgl.Pose:
if shape.pose is None:
return redisgl.Pose()
elif isinstance(shape, shapes.Cylinder):
quat_pybullet_to_redisgl = eigen.Quaterniond(
eigen.AngleAxisd(np.pi / 2, np.array([1.0, 0.0, 0.0]))
)
quat = eigen.Quaterniond(shape.pose.quat) * quat_pybullet_to_redisgl
return redisgl.Pose(shape.pose.pos, quat.coeffs)
else:
return redisgl.Pose(shape.pose.pos, shape.pose.quat)
def create_geometry(shape: shapes.Shape) -> redisgl.Geometry:
if isinstance(shape, shapes.Box):
return redisgl.Box(scale=shape.size)
elif isinstance(shape, shapes.Cylinder):
return redisgl.Cylinder(radius=shape.radius, length=shape.length)
elif isinstance(shape, shapes.Sphere):
return redisgl.Sphere(radius=shape.radius)
raise NotImplementedError(f"Shape type {shape} is not supported.")
def create_graphics(object: Object) -> Sequence[redisgl.Graphics]:
if isinstance(object, Variant):
return []
return [
redisgl.Graphics(
name=object.name,
geometry=create_geometry(shape),
T_to_parent=create_pose(shape),
)
for shape in object.shapes
]
def create_object_model(object: Object, key_namespace: str) -> redisgl.ObjectModel:
return redisgl.ObjectModel(
name=object.name,
graphics=create_graphics(object),
key_pos=f"{key_namespace}::objects::{object.name}::pos",
key_ori=f"{key_namespace}::objects::{object.name}::ori",
)
class ObjectTracker:
def __init__(
self,
objects: Dict[str, Object],
redis_host: str,
redis_port: int,
redis_password: str,
key_namespace: str,
object_key_prefix: str,
assets_path: Union[str, pathlib.Path],
):
self._redis = ctrlutils.RedisClient(redis_host, redis_port, redis_password)
self._redis_pipe = self._redis.pipeline()
self._object_key_prefix = object_key_prefix
self._assets_path = str(pathlib.Path(assets_path).absolute())
redisgl.register_resource_path(self._redis_pipe, self._assets_path)
self._model_keys = redisgl.ModelKeys(key_namespace)
redisgl.register_model_keys(self._redis_pipe, self._model_keys)
self._redis_pipe.execute()
self._tracked_objects = [] # self.get_tracked_objects(objects.values())
for object in objects.values():
try:
redisgl.register_object(
self._redis_pipe,
self._model_keys,
object=create_object_model(object, key_namespace),
)
except NotImplementedError:
continue
self._tracked_objects.append(object)
self._redis_pipe.execute()
def __del__(self) -> None:
redisgl.unregister_resource_path(self._redis_pipe, self._assets_path)
redisgl.unregister_model_keys(self._redis_pipe, self._model_keys)
for object in self._tracked_objects:
redisgl.unregister_object(self._redis_pipe, self._model_keys, object.name)
self._redis_pipe.execute()
def get_tracked_objects(self, objects: Iterable[Object]) -> List[Object]:
for object in objects:
self._redis_pipe.get(self._object_key_prefix + object.name + "::pos")
object_models = self._redis_pipe.execute()
return [
object
for object, object_model in zip(objects, object_models)
if object_model is not None
]
def update_poses(
self,
objects: Optional[Iterable[Object]] = None,
exclude: Optional[Sequence[Object]] = None,
) -> List[Object]:
if objects is None:
objects = self._tracked_objects
# Query all object poses.
for object in objects:
self._redis_pipe.get(self._object_key_prefix + object.name + "::pos")
self._redis_pipe.get(self._object_key_prefix + object.name + "::ori")
b_object_poses = self._redis_pipe.execute()
# Set returned poses.
updated_objects = []
for i, object in enumerate(objects):
if exclude is not None and object in exclude:
continue
b_object_pos = b_object_poses[2 * i]
b_object_quat = b_object_poses[2 * i + 1]
if b_object_pos is None or b_object_quat is None:
continue
object_pos = ctrlutils.redis.decode_matlab(b_object_pos)
object_quat = ctrlutils.redis.decode_matlab(b_object_quat)
| object.set_pose(math.Pose(object_pos, object_quat)) | 1 | 2023-10-16 00:22:40+00:00 | 8k |
akashgreninja/GreSec | backend/venv/lib/python3.10/site-packages/charset_normalizer/md.py | [
{
"identifier": "COMMON_SAFE_ASCII_CHARACTERS",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/constant.py",
"snippet": "COMMON_SAFE_ASCII_CHARACTERS: Set[str] = {\n \"<\",\n \">\",\n \"=\",\n \":\",\n \"/\",\n \"&\",\n \";\",\n \"{\",\n \"}\",\n \"[\",\n \"]\",\n \",\",\n \"|\",\n '\"',\n \"-\",\n}"
},
{
"identifier": "TRACE",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/constant.py",
"snippet": "TRACE: int = 5"
},
{
"identifier": "UNICODE_SECONDARY_RANGE_KEYWORD",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/constant.py",
"snippet": "UNICODE_SECONDARY_RANGE_KEYWORD: List[str] = [\n \"Supplement\",\n \"Extended\",\n \"Extensions\",\n \"Modifier\",\n \"Marks\",\n \"Punctuation\",\n \"Symbols\",\n \"Forms\",\n \"Operators\",\n \"Miscellaneous\",\n \"Drawing\",\n \"Block\",\n \"Shapes\",\n \"Supplemental\",\n \"Tags\",\n]"
},
{
"identifier": "is_accentuated",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_accentuated(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return (\n \"WITH GRAVE\" in description\n or \"WITH ACUTE\" in description\n or \"WITH CEDILLA\" in description\n or \"WITH DIAERESIS\" in description\n or \"WITH CIRCUMFLEX\" in description\n or \"WITH TILDE\" in description\n )"
},
{
"identifier": "is_case_variable",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_case_variable(character: str) -> bool:\n return character.islower() != character.isupper()"
},
{
"identifier": "is_cjk",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_cjk(character: str) -> bool:\n try:\n character_name = unicodedata.name(character)\n except ValueError:\n return False\n\n return \"CJK\" in character_name"
},
{
"identifier": "is_emoticon",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_emoticon(character: str) -> bool:\n character_range: Optional[str] = unicode_range(character)\n\n if character_range is None:\n return False\n\n return \"Emoticons\" in character_range or \"Pictographs\" in character_range"
},
{
"identifier": "is_hangul",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_hangul(character: str) -> bool:\n try:\n character_name = unicodedata.name(character)\n except ValueError:\n return False\n\n return \"HANGUL\" in character_name"
},
{
"identifier": "is_hiragana",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_hiragana(character: str) -> bool:\n try:\n character_name = unicodedata.name(character)\n except ValueError:\n return False\n\n return \"HIRAGANA\" in character_name"
},
{
"identifier": "is_katakana",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_katakana(character: str) -> bool:\n try:\n character_name = unicodedata.name(character)\n except ValueError:\n return False\n\n return \"KATAKANA\" in character_name"
},
{
"identifier": "is_latin",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_latin(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return \"LATIN\" in description"
},
{
"identifier": "is_punctuation",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_punctuation(character: str) -> bool:\n character_category: str = unicodedata.category(character)\n\n if \"P\" in character_category:\n return True\n\n character_range: Optional[str] = unicode_range(character)\n\n if character_range is None:\n return False\n\n return \"Punctuation\" in character_range"
},
{
"identifier": "is_separator",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_separator(character: str) -> bool:\n if character.isspace() or character in {\"|\", \"+\", \"<\", \">\"}:\n return True\n\n character_category: str = unicodedata.category(character)\n\n return \"Z\" in character_category or character_category in {\"Po\", \"Pd\", \"Pc\"}"
},
{
"identifier": "is_symbol",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_symbol(character: str) -> bool:\n character_category: str = unicodedata.category(character)\n\n if \"S\" in character_category or \"N\" in character_category:\n return True\n\n character_range: Optional[str] = unicode_range(character)\n\n if character_range is None:\n return False\n\n return \"Forms\" in character_range and character_category != \"Lo\""
},
{
"identifier": "is_thai",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_thai(character: str) -> bool:\n try:\n character_name = unicodedata.name(character)\n except ValueError:\n return False\n\n return \"THAI\" in character_name"
},
{
"identifier": "is_unprintable",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_unprintable(character: str) -> bool:\n return (\n character.isspace() is False # includes \\n \\t \\r \\v\n and character.isprintable() is False\n and character != \"\\x1A\" # Why? Its the ASCII substitute character.\n and character != \"\\ufeff\" # bug discovered in Python,\n # Zero Width No-Break Space located in \tArabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.\n )"
},
{
"identifier": "remove_accent",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef remove_accent(character: str) -> str:\n decomposed: str = unicodedata.decomposition(character)\n if not decomposed:\n return character\n\n codes: List[str] = decomposed.split(\" \")\n\n return chr(int(codes[0], 16))"
},
{
"identifier": "unicode_range",
"path": "backend/venv/lib/python3.10/site-packages/charset_normalizer/utils.py",
"snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef unicode_range(character: str) -> Optional[str]:\n \"\"\"\n Retrieve the Unicode range official name from a single character.\n \"\"\"\n character_ord: int = ord(character)\n\n for range_name, ord_range in UNICODE_RANGES_COMBINED.items():\n if character_ord in ord_range:\n return range_name\n\n return None"
}
] | from functools import lru_cache
from logging import getLogger
from typing import List, Optional
from .constant import (
COMMON_SAFE_ASCII_CHARACTERS,
TRACE,
UNICODE_SECONDARY_RANGE_KEYWORD,
)
from .utils import (
is_accentuated,
is_case_variable,
is_cjk,
is_emoticon,
is_hangul,
is_hiragana,
is_katakana,
is_latin,
is_punctuation,
is_separator,
is_symbol,
is_thai,
is_unprintable,
remove_accent,
unicode_range,
) | 3,775 | def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._unprintable_count * 8) / self._character_count
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._successive_count: int = 0
self._character_count: int = 0
self._last_latin_character: Optional[str] = None
def eligible(self, character: str) -> bool:
return character.isalpha() and is_latin(character)
def feed(self, character: str) -> None:
self._character_count += 1
if (
self._last_latin_character is not None
and is_accentuated(character)
and is_accentuated(self._last_latin_character)
):
if character.isupper() and self._last_latin_character.isupper():
self._successive_count += 1
# Worse if its the same char duplicated with different accent.
if remove_accent(character) == remove_accent(self._last_latin_character):
self._successive_count += 1
self._last_latin_character = character
def reset(self) -> None: # pragma: no cover
self._successive_count = 0
self._character_count = 0
self._last_latin_character = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._successive_count * 2) / self._character_count
class SuspiciousRange(MessDetectorPlugin):
def __init__(self) -> None:
self._suspicious_successive_range_count: int = 0
self._character_count: int = 0
self._last_printable_seen: Optional[str] = None
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if (
character.isspace()
or is_punctuation(character)
or character in COMMON_SAFE_ASCII_CHARACTERS
):
self._last_printable_seen = None
return
if self._last_printable_seen is None:
self._last_printable_seen = character
return
unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen)
unicode_range_b: Optional[str] = unicode_range(character)
if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
self._suspicious_successive_range_count += 1
self._last_printable_seen = character
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._suspicious_successive_range_count = 0
self._last_printable_seen = None
@property
def ratio(self) -> float:
if self._character_count <= 24:
return 0.0
ratio_of_suspicious_range_usage: float = (
self._suspicious_successive_range_count * 2
) / self._character_count
return ratio_of_suspicious_range_usage
class SuperWeirdWordPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._word_count: int = 0
self._bad_word_count: int = 0
self._foreign_long_count: int = 0
self._is_current_word_bad: bool = False
self._foreign_long_watch: bool = False
self._character_count: int = 0
self._bad_character_count: int = 0
self._buffer: str = ""
self._buffer_accent_count: int = 0
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character.isalpha():
self._buffer += character
if is_accentuated(character):
self._buffer_accent_count += 1
if (
self._foreign_long_watch is False
and (is_latin(character) is False or is_accentuated(character))
and is_cjk(character) is False
|
class MessDetectorPlugin:
"""
Base abstract class used for mess detection plugins.
All detectors MUST extend and implement given methods.
"""
def eligible(self, character: str) -> bool:
"""
Determine if given character should be fed in.
"""
raise NotImplementedError # pragma: nocover
def feed(self, character: str) -> None:
"""
The main routine to be executed upon character.
Insert the logic in witch the text would be considered chaotic.
"""
raise NotImplementedError # pragma: nocover
def reset(self) -> None: # pragma: no cover
"""
Permit to reset the plugin to the initial state.
"""
raise NotImplementedError
@property
def ratio(self) -> float:
"""
Compute the chaos ratio based on what your feed() has seen.
Must NOT be lower than 0.; No restriction gt 0.
"""
raise NotImplementedError # pragma: nocover
class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._punctuation_count: int = 0
self._symbol_count: int = 0
self._character_count: int = 0
self._last_printable_char: Optional[str] = None
self._frenzy_symbol_in_word: bool = False
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if (
character != self._last_printable_char
and character not in COMMON_SAFE_ASCII_CHARACTERS
):
if is_punctuation(character):
self._punctuation_count += 1
elif (
character.isdigit() is False
and is_symbol(character)
and is_emoticon(character) is False
):
self._symbol_count += 2
self._last_printable_char = character
def reset(self) -> None: # pragma: no cover
self._punctuation_count = 0
self._character_count = 0
self._symbol_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_punctuation: float = (
self._punctuation_count + self._symbol_count
) / self._character_count
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
class TooManyAccentuatedPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._character_count: int = 0
self._accentuated_count: int = 0
def eligible(self, character: str) -> bool:
return character.isalpha()
def feed(self, character: str) -> None:
self._character_count += 1
if is_accentuated(character):
self._accentuated_count += 1
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._accentuated_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0 or self._character_count < 8:
return 0.0
ratio_of_accentuation: float = self._accentuated_count / self._character_count
return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
class UnprintablePlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._unprintable_count: int = 0
self._character_count: int = 0
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if is_unprintable(character):
self._unprintable_count += 1
self._character_count += 1
def reset(self) -> None: # pragma: no cover
self._unprintable_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._unprintable_count * 8) / self._character_count
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._successive_count: int = 0
self._character_count: int = 0
self._last_latin_character: Optional[str] = None
def eligible(self, character: str) -> bool:
return character.isalpha() and is_latin(character)
def feed(self, character: str) -> None:
self._character_count += 1
if (
self._last_latin_character is not None
and is_accentuated(character)
and is_accentuated(self._last_latin_character)
):
if character.isupper() and self._last_latin_character.isupper():
self._successive_count += 1
# Worse if its the same char duplicated with different accent.
if remove_accent(character) == remove_accent(self._last_latin_character):
self._successive_count += 1
self._last_latin_character = character
def reset(self) -> None: # pragma: no cover
self._successive_count = 0
self._character_count = 0
self._last_latin_character = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._successive_count * 2) / self._character_count
class SuspiciousRange(MessDetectorPlugin):
def __init__(self) -> None:
self._suspicious_successive_range_count: int = 0
self._character_count: int = 0
self._last_printable_seen: Optional[str] = None
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if (
character.isspace()
or is_punctuation(character)
or character in COMMON_SAFE_ASCII_CHARACTERS
):
self._last_printable_seen = None
return
if self._last_printable_seen is None:
self._last_printable_seen = character
return
unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen)
unicode_range_b: Optional[str] = unicode_range(character)
if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
self._suspicious_successive_range_count += 1
self._last_printable_seen = character
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._suspicious_successive_range_count = 0
self._last_printable_seen = None
@property
def ratio(self) -> float:
if self._character_count <= 24:
return 0.0
ratio_of_suspicious_range_usage: float = (
self._suspicious_successive_range_count * 2
) / self._character_count
return ratio_of_suspicious_range_usage
class SuperWeirdWordPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._word_count: int = 0
self._bad_word_count: int = 0
self._foreign_long_count: int = 0
self._is_current_word_bad: bool = False
self._foreign_long_watch: bool = False
self._character_count: int = 0
self._bad_character_count: int = 0
self._buffer: str = ""
self._buffer_accent_count: int = 0
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character.isalpha():
self._buffer += character
if is_accentuated(character):
self._buffer_accent_count += 1
if (
self._foreign_long_watch is False
and (is_latin(character) is False or is_accentuated(character))
and is_cjk(character) is False | and is_hangul(character) is False | 7 | 2023-10-23 18:09:28+00:00 | 8k |
marmotlab/Context_Aware_Navigation | graph_generator.py | [
{
"identifier": "Node",
"path": "node.py",
"snippet": "class Node():\r\n def __init__(self, coords, frontiers, robot_belief, target_position):\r\n self.coords = coords\r\n self.observable_frontiers = []\r\n self.sensor_range = 80\r\n self.target_position = target_position\r\n self.direction_vector = self.get_direction_vector()\r\n self.initialize_observable_frontiers(frontiers, robot_belief)\r\n self.utility = self.get_node_utility()\r\n if self.utility == 0:\r\n self.zero_utility_node = True\r\n else:\r\n self.utiliy = 1\r\n self.zero_utility_node = False\r\n\r\n def initialize_observable_frontiers(self, frontiers, robot_belief):\r\n dist_list = np.linalg.norm(frontiers - self.coords, axis=-1)\r\n frontiers_in_range = frontiers[dist_list < self.sensor_range - 10]\r\n for point in frontiers_in_range:\r\n collision = self.check_collision(self.coords, point, robot_belief)\r\n if not collision:\r\n self.observable_frontiers.append(point)\r\n\r\n def get_direction_vector(self):\r\n dx = self.target_position[0] - self.coords[0]\r\n dy = self.target_position[1] - self.coords[1]\r\n mag = (dx**2 + dy**2)** .5\r\n if mag != 0:\r\n dx = dx / mag\r\n dy = dy / mag\r\n if mag > 80:\r\n mag = 80\r\n return [dx, dy, mag]\r\n\r\n def get_node_utility(self):\r\n return len(self.observable_frontiers)\r\n\r\n def update_observable_frontiers(self, observed_frontiers, new_frontiers, robot_belief):\r\n if observed_frontiers != []:\r\n observed_index = []\r\n for i, point in enumerate(self.observable_frontiers):\r\n if point[0] + point[1] * 1j in observed_frontiers[:, 0] + observed_frontiers[:, 1] * 1j:\r\n observed_index.append(i)\r\n for index in reversed(observed_index):\r\n self.observable_frontiers.pop(index)\r\n\r\n if new_frontiers != []:\r\n dist_list = np.linalg.norm(new_frontiers - self.coords, axis=-1)\r\n new_frontiers_in_range = new_frontiers[dist_list < self.sensor_range - 10]\r\n for point in new_frontiers_in_range:\r\n collision = self.check_collision(self.coords, point, robot_belief)\r\n if not collision:\r\n self.observable_frontiers.append(point)\r\n self.utility = self.get_node_utility()\r\n if self.utility < 5:\r\n self.utility = 0\r\n if self.utility == 0:\r\n self.zero_utility_node = True\r\n else:\r\n self.zero_utility_node = False\r\n\r\n def set_visited(self):\r\n self.observable_frontiers = []\r\n self.utility = 0\r\n self.zero_utility_node = True\r\n\r\n def check_collision(self, start, end, robot_belief):\r\n # Bresenham line algorithm checking\r\n collision = False\r\n\r\n x0 = start[0].round()\r\n y0 = start[1].round()\r\n x1 = end[0].round()\r\n y1 = end[1].round()\r\n dx, dy = abs(x1 - x0), abs(y1 - y0)\r\n x, y = x0, y0\r\n error = dx - dy\r\n x_inc = 1 if x1 > x0 else -1\r\n y_inc = 1 if y1 > y0 else -1\r\n dx *= 2\r\n dy *= 2\r\n\r\n while 0 <= x < robot_belief.shape[1] and 0 <= y < robot_belief.shape[0]:\r\n k = robot_belief.item(int(y), int(x))\r\n if x == x1 and y == y1:\r\n break\r\n if k == 1:\r\n collision = True\r\n break\r\n if k == 127:\r\n collision = True\r\n break\r\n if error > 0:\r\n x += x_inc\r\n error -= dy\r\n else:\r\n y += y_inc\r\n error += dx\r\n return collision\r"
},
{
"identifier": "Graph",
"path": "graph.py",
"snippet": "class Graph:\r\n def __init__(self):\r\n self.nodes = set()\r\n self.edges = dict()\r\n\r\n def add_node(self, node):\r\n self.nodes.add(node)\r\n\r\n def add_edge(self, from_node, to_node, length):\r\n edge = Edge(to_node, length)\r\n # edge = to_node\r\n if from_node in self.edges:\r\n from_node_edges = self.edges[from_node]\r\n else:\r\n self.edges[from_node] = dict()\r\n from_node_edges = self.edges[from_node]\r\n\r\n from_node_edges[to_node] = edge\r\n\r\n def clear_edge(self, from_node):\r\n if from_node in self.edges:\r\n self.edges[from_node] = dict()\r"
},
{
"identifier": "a_star",
"path": "graph.py",
"snippet": "def a_star(start, destination, node_coords, graph):\r\n if start == destination:\r\n return [], 0\r\n if str(destination) in graph.edges[str(start)].keys():\r\n cost = graph.edges[str(start)][str(destination)].length\r\n return [start, destination], cost\r\n open_list = {start}\r\n closed_list = set([])\r\n\r\n g = {start: 0}\r\n parents = {start: start}\r\n\r\n while len(open_list) > 0:\r\n n = None\r\n h_n = 1e5\r\n # print('open list', open_list)\r\n for v in open_list:\r\n h_v = h(v, destination, node_coords)\r\n if n is not None:\r\n h_n = h(n, destination, node_coords)\r\n if n is None or g[v] + h_v < g[n] + h_n:\r\n n = v\r\n\r\n if n is None:\r\n print('Path does not exist!')\r\n return None, 1e5\r\n\r\n if n == destination:\r\n reconst_path = []\r\n while parents[n] != n:\r\n reconst_path.append(n)\r\n n = parents[n]\r\n reconst_path.append(start)\r\n reconst_path.reverse()\r\n # print('Path found: {}'.format(reconst_path))\r\n # print(g[destination])\r\n return reconst_path, g[destination]\r\n\r\n for edge in graph.edges[str(n)].values():\r\n m = int(edge.to_node)\r\n cost = edge.length\r\n # print(m, cost)\r\n if m not in open_list and m not in closed_list:\r\n open_list.add(m)\r\n parents[m] = n\r\n g[m] = g[n] + cost\r\n\r\n else:\r\n if g[m] > g[n] + cost:\r\n g[m] = g[n] + cost\r\n parents[m] = n\r\n\r\n if m in closed_list:\r\n closed_list.remove(m)\r\n open_list.add(m)\r\n\r\n open_list.remove(n)\r\n closed_list.add(n)\r\n\r\n print('Path does not exist!')\r\n return None, 1e5\r"
}
] | import numpy as np
import copy
from sklearn.neighbors import NearestNeighbors
from parameter import *
from node import Node
from graph import Graph, a_star
| 4,144 | x = self.node_coords[:, 0] + self.node_coords[:, 1] * 1j
for node in self.route_node:
index = np.argwhere(x.reshape(-1) == node[0] + node[1] * 1j)
self.indicator[index] = 1
return self.node_coords, self.graph.edges, self.node_utility, self.indicator, self.direction_vector
def generate_uniform_points(self):
x = np.linspace(0, self.map_x - 1, 30).round().astype(int)
y = np.linspace(0, self.map_y - 1, 30).round().astype(int)
t1, t2 = np.meshgrid(x, y)
points = np.vstack([t1.T.ravel(), t2.T.ravel()]).T
return points
def free_area(self, robot_belief):
# free area 255
index = np.where(robot_belief == 255)
free = np.asarray([index[1], index[0]]).T
return free
def unique_coords(self, coords):
x = coords[:, 0] + coords[:, 1] * 1j
indices = np.unique(x, return_index=True)[1]
coords = np.array([coords[idx] for idx in sorted(indices)])
return coords
def find_nearest_node_index(self, position):
index = np.argmin(np.linalg.norm(self.node_coords - position, axis=1))
return index
def find_index_from_coords(self, node_coords, p):
return np.argmin(np.linalg.norm(node_coords - p, axis=1))
def find_k_neighbor(self, coords, node_coords, robot_belief):
dist_list = np.linalg.norm((coords - node_coords), axis=-1)
sorted_index = np.argsort(dist_list)
k = 0
neighbor_index_list = []
while k < self.k_size and k < node_coords.shape[0]:
neighbor_index = sorted_index[k]
neighbor_index_list.append(neighbor_index)
start = coords
end = node_coords[neighbor_index]
if not self.check_collision(start, end, robot_belief):
a = str(self.find_index_from_coords(node_coords, start))
b = str(neighbor_index)
dist = np.linalg.norm(start - end)
self.graph.add_node(a)
self.graph.add_edge(a, b, dist)
# test
self.graph.add_node(b)
self.graph.add_edge(b, a, dist)
k += 1
return neighbor_index_list
def find_k_neighbor_all_nodes(self, node_coords, robot_belief, ground_truth=False):
X = node_coords
if len(node_coords) >= self.k_size:
knn = NearestNeighbors(n_neighbors=self.k_size)
else:
knn = NearestNeighbors(n_neighbors=len(node_coords))
knn.fit(X)
distances, indices = knn.kneighbors(X)
for i, p in enumerate(X):
for j, neighbour in enumerate(X[indices[i][:]]):
start = p
end = neighbour
if not self.check_collision(start, end, robot_belief):
a = str(self.find_index_from_coords(node_coords, p))
b = str(self.find_index_from_coords(node_coords, neighbour))
if not ground_truth:
self.graph.add_node(a)
self.graph.add_edge(a, b, distances[i, j])
if self.plot:
self.x.append([p[0], neighbour[0]])
self.y.append([p[1], neighbour[1]])
else:
self.ground_truth_graph.add_node(a)
self.ground_truth_graph.add_edge(a, b, distances[i, j])
def find_index_from_coords(self, node_coords, p):
return np.where(np.linalg.norm(node_coords - p, axis=1) < 1e-5)[0][0]
def check_collision(self, start, end, robot_belief):
# Bresenham line algorithm checking
collision = False
x0 = start[0].round()
y0 = start[1].round()
x1 = end[0].round()
y1 = end[1].round()
dx, dy = abs(x1 - x0), abs(y1 - y0)
x, y = x0, y0
error = dx - dy
x_inc = 1 if x1 > x0 else -1
y_inc = 1 if y1 > y0 else -1
dx *= 2
dy *= 2
while 0 <= x < robot_belief.shape[1] and 0 <= y < robot_belief.shape[0]:
k = robot_belief.item(int(y), int(x))
if x == x1 and y == y1:
break
if k == 1:
collision = True
break
if k == 127:
collision = True
break
if error > 0:
x += x_inc
error -= dy
else:
y += y_inc
error += dx
return collision
def find_shortest_path(self, current, destination, node_coords, graph):
start_node = str(self.find_index_from_coords(node_coords, current))
end_node = str(self.find_index_from_coords(node_coords, destination))
|
class Graph_generator:
def __init__(self, map_size, k_size, sensor_range, target_position, plot=False):
self.k_size = k_size
self.graph = Graph()
self.ground_truth_graph = Graph()
self.node_coords = None
self.plot = plot
self.x = []
self.y = []
self.map_x = map_size[1]
self.map_y = map_size[0]
self.uniform_points = self.generate_uniform_points()
self.sensor_range = sensor_range
self.route_node = []
self.nodes_list = []
self.node_utility = None
self.indicator = None
self.direction_vector = None
self.target_position = target_position
def generate_node_coords(self, robot_location, robot_belief):
free_area = self.free_area(robot_belief)
free_area_to_check = free_area[:, 0] + free_area[:, 1] * 1j
uniform_points_to_check = self.uniform_points[:, 0] + self.uniform_points[:, 1] * 1j
_, _, candidate_indices = np.intersect1d(free_area_to_check, uniform_points_to_check, return_indices=True)
node_coords = self.uniform_points[candidate_indices]
node_coords = np.concatenate((robot_location.reshape(1, 2), self.target_position.reshape(1, 2), node_coords))
return self.unique_coords(node_coords).reshape(-1, 2)
def edge_clear_all_nodes(self):
self.graph = Graph()
self.x = []
self.y = []
def edge_clear(self, coords):
node_index = str(self.find_index_from_coords(self.node_coords, coords))
self.graph.clear_edge(node_index)
def generate_graph(self, robot_location, ground_truth_belief, robot_belief, frontiers):
self.node_coords = self.generate_node_coords(robot_location, robot_belief)
self.ground_truth_node_coords = self.generate_node_coords(robot_location, ground_truth_belief)
self.find_k_neighbor_all_nodes(self.node_coords, robot_belief)
self.find_k_neighbor_all_nodes(self.ground_truth_node_coords, ground_truth_belief, ground_truth=True)
self.node_utility = []
self.direction_vector = []
for coords in self.node_coords:
node = Node(coords, frontiers, robot_belief, self.target_position)
self.nodes_list.append(node)
utility = node.utility
direction_vector = node.direction_vector
self.direction_vector.append(direction_vector)
self.node_utility.append(utility)
self.direction_vector = np.array(self.direction_vector)
self.node_utility = np.array(self.node_utility)
self.indicator = np.zeros((self.node_coords.shape[0], 1))
x = self.node_coords[:,0] + self.node_coords[:,1]*1j
for node in self.route_node:
index = np.argwhere(x.reshape(-1) == node[0]+node[1]*1j)[0]
self.indicator[index] = 1
return self.node_coords, self.graph.edges, self.node_utility, self.indicator, self.direction_vector
def update_graph(self, robot_position, robot_belief, old_robot_belief, frontiers, old_frontiers):
# add uniform points in the new free area to the node coords
new_free_area = self.free_area((robot_belief - old_robot_belief > 0) * 255)
free_area_to_check = new_free_area[:, 0] + new_free_area[:, 1] * 1j
uniform_points_to_check = self.uniform_points[:, 0] + self.uniform_points[:, 1] * 1j
_, _, candidate_indices = np.intersect1d(free_area_to_check, uniform_points_to_check, return_indices=True)
new_node_coords = self.uniform_points[candidate_indices]
old_node_coords = copy.deepcopy(self.node_coords)
self.node_coords = np.concatenate((self.node_coords, new_node_coords, self.target_position.reshape(1, 2)))
self.node_coords = self.unique_coords(self.node_coords).reshape(-1, 2)
self.edge_clear_all_nodes()
self.find_k_neighbor_all_nodes(self.node_coords, robot_belief)
# update the observable frontiers through the change of frontiers
old_frontiers_to_check = old_frontiers[:, 0] + old_frontiers[:, 1] * 1j
new_frontiers_to_check = frontiers[:, 0] + frontiers[:, 1] * 1j
observed_frontiers_index = np.where(
np.isin(old_frontiers_to_check, new_frontiers_to_check, assume_unique=True) == False)
new_frontiers_index = np.where(
np.isin(new_frontiers_to_check, old_frontiers_to_check, assume_unique=True) == False)
observed_frontiers = old_frontiers[observed_frontiers_index]
new_frontiers = frontiers[new_frontiers_index]
for node in self.nodes_list:
if np.linalg.norm(node.coords - robot_position) > 2 * self.sensor_range:
pass
elif node.zero_utility_node is True:
pass
else:
node.update_observable_frontiers(observed_frontiers, new_frontiers, robot_belief)
for new_coords in new_node_coords:
node = Node(new_coords, frontiers, robot_belief, self.target_position)
self.nodes_list.append(node)
self.direction_vector = []
self.node_utility = []
for i, coords in enumerate(self.node_coords):
utility = self.nodes_list[i].utility
node = Node(coords, frontiers, robot_belief, self.target_position)
self.node_utility.append(utility)
direction_vector = node.direction_vector
self.direction_vector.append(direction_vector)
self.direction_vector = np.array(self.direction_vector)
self.node_utility = np.array(self.node_utility)
self.indicator = np.zeros((self.node_coords.shape[0], 1))
x = self.node_coords[:, 0] + self.node_coords[:, 1] * 1j
for node in self.route_node:
index = np.argwhere(x.reshape(-1) == node[0] + node[1] * 1j)
self.indicator[index] = 1
return self.node_coords, self.graph.edges, self.node_utility, self.indicator, self.direction_vector
def generate_uniform_points(self):
x = np.linspace(0, self.map_x - 1, 30).round().astype(int)
y = np.linspace(0, self.map_y - 1, 30).round().astype(int)
t1, t2 = np.meshgrid(x, y)
points = np.vstack([t1.T.ravel(), t2.T.ravel()]).T
return points
def free_area(self, robot_belief):
# free area 255
index = np.where(robot_belief == 255)
free = np.asarray([index[1], index[0]]).T
return free
def unique_coords(self, coords):
x = coords[:, 0] + coords[:, 1] * 1j
indices = np.unique(x, return_index=True)[1]
coords = np.array([coords[idx] for idx in sorted(indices)])
return coords
def find_nearest_node_index(self, position):
index = np.argmin(np.linalg.norm(self.node_coords - position, axis=1))
return index
def find_index_from_coords(self, node_coords, p):
return np.argmin(np.linalg.norm(node_coords - p, axis=1))
def find_k_neighbor(self, coords, node_coords, robot_belief):
dist_list = np.linalg.norm((coords - node_coords), axis=-1)
sorted_index = np.argsort(dist_list)
k = 0
neighbor_index_list = []
while k < self.k_size and k < node_coords.shape[0]:
neighbor_index = sorted_index[k]
neighbor_index_list.append(neighbor_index)
start = coords
end = node_coords[neighbor_index]
if not self.check_collision(start, end, robot_belief):
a = str(self.find_index_from_coords(node_coords, start))
b = str(neighbor_index)
dist = np.linalg.norm(start - end)
self.graph.add_node(a)
self.graph.add_edge(a, b, dist)
# test
self.graph.add_node(b)
self.graph.add_edge(b, a, dist)
k += 1
return neighbor_index_list
def find_k_neighbor_all_nodes(self, node_coords, robot_belief, ground_truth=False):
X = node_coords
if len(node_coords) >= self.k_size:
knn = NearestNeighbors(n_neighbors=self.k_size)
else:
knn = NearestNeighbors(n_neighbors=len(node_coords))
knn.fit(X)
distances, indices = knn.kneighbors(X)
for i, p in enumerate(X):
for j, neighbour in enumerate(X[indices[i][:]]):
start = p
end = neighbour
if not self.check_collision(start, end, robot_belief):
a = str(self.find_index_from_coords(node_coords, p))
b = str(self.find_index_from_coords(node_coords, neighbour))
if not ground_truth:
self.graph.add_node(a)
self.graph.add_edge(a, b, distances[i, j])
if self.plot:
self.x.append([p[0], neighbour[0]])
self.y.append([p[1], neighbour[1]])
else:
self.ground_truth_graph.add_node(a)
self.ground_truth_graph.add_edge(a, b, distances[i, j])
def find_index_from_coords(self, node_coords, p):
return np.where(np.linalg.norm(node_coords - p, axis=1) < 1e-5)[0][0]
def check_collision(self, start, end, robot_belief):
# Bresenham line algorithm checking
collision = False
x0 = start[0].round()
y0 = start[1].round()
x1 = end[0].round()
y1 = end[1].round()
dx, dy = abs(x1 - x0), abs(y1 - y0)
x, y = x0, y0
error = dx - dy
x_inc = 1 if x1 > x0 else -1
y_inc = 1 if y1 > y0 else -1
dx *= 2
dy *= 2
while 0 <= x < robot_belief.shape[1] and 0 <= y < robot_belief.shape[0]:
k = robot_belief.item(int(y), int(x))
if x == x1 and y == y1:
break
if k == 1:
collision = True
break
if k == 127:
collision = True
break
if error > 0:
x += x_inc
error -= dy
else:
y += y_inc
error += dx
return collision
def find_shortest_path(self, current, destination, node_coords, graph):
start_node = str(self.find_index_from_coords(node_coords, current))
end_node = str(self.find_index_from_coords(node_coords, destination))
| route, dist = a_star(int(start_node), int(end_node), node_coords, graph)
| 2 | 2023-10-17 04:32:42+00:00 | 8k |
WestlakeIntelligentRobotics/ConsensusLLM-code | modules/experiment/vector2d_debate.py | [
{
"identifier": "Template",
"path": "modules/experiment/template.py",
"snippet": "class Template(ABC):\n \"\"\"\n A template class for designing and running experiments with multiple agents\n and rounds.\n\n This abstract class defines a template for designing experiments where \n multiple agents interact in multiple rounds. Subclasses must implement \n various methods to customize the behavior of the experiment, including \n generating questions, managing agents, updating experiment records, and \n performing post-processing.\n\n Attributes:\n _record (dict): A dictionary for recording experiment data.\n _n_agent (int): Number of agents participating in the experiment.\n _n_round (int): Number of rounds in the experiment.\n _n_experiment (int): Number of independent experiments to run.\n _lock (threading.Lock):\n A lock for ensuring thread safety during data updates.\n\n Subclasses should implement the following abstract methods:\n - _generate_question\n - _generate_agents\n - _update_record\n - _round_postprocess\n - _exp_postprocess\n\n Public Methods:\n - run: Run the experiment using a thread pool for concurrency.\n - save_record: Save the experiment record to a file.\n\n To use this template, create a subclass that defines the specific behavior\n of the experiment.\n \"\"\"\n def __init__(self, args):\n \"\"\"\n Initialize the Template with provided arguments.\n\n Initializes instance variables for managing the experiment.\n \"\"\"\n self._record = {} # A dictionary for recording data\n self._n_agent = args.agents # Number of agents\n self._n_round = args.rounds # Number of rounds\n self._n_experiment = args.n_exp # Number of experiments\n self._lock = threading.Lock() # Lock for thread safety\n\n @abstractmethod\n def _generate_question(self, agent, round) -> str:\n \"\"\"\n Generate a question for an agent in a specific round.\n\n Args:\n agent: An agent participating in the experiment.\n round: The current round of the experiment.\n\n Returns:\n str: The generated question.\n \"\"\"\n pass\n\n @abstractmethod\n def _generate_agents(self, simulation_ind):\n \"\"\"\n Generate a set of agents for a simulation.\n\n Args:\n simulation_ind: Index of the current simulation.\n\n Returns:\n list: A list of agent objects.\n \"\"\"\n pass\n\n @abstractmethod\n def _update_record(self, record, agent_contexts, simulation_ind, agents):\n \"\"\"\n Update the experiment record based on agent data.\n\n Args:\n record: The experiment record to be updated.\n agent_contexts: List of agent histories and data.\n simulation_ind: Index of the current simulation.\n agents: List of agents participating in the experiment.\n \"\"\"\n pass\n\n @abstractmethod\n def _round_postprocess(self, simulation_ind, round, results, agents):\n \"\"\"\n Perform post-processing for a round of the experiment.\n\n Args:\n simulation_ind: Index of the current simulation.\n round: The current round of the experiment.\n results: List of results from agents.\n agents: List of agents participating in the experiment.\n \"\"\"\n pass\n\n @abstractmethod\n def _exp_postprocess(self):\n \"\"\"\n Perform post-processing for the entire experiment.\n \"\"\"\n pass\n\n def run(self):\n \"\"\"\n Run the experiment using a thread pool for concurrency.\n \"\"\"\n try:\n with ThreadPoolExecutor(max_workers=self._n_experiment) as executor:\n progress = tqdm(total=self._n_experiment * self._n_round, \n desc=\"Processing\", dynamic_ncols=True)\n futures = {executor.submit(self._run_once, sim_ind, progress) \n for sim_ind in range(self._n_experiment)}\n\n for future in as_completed(futures):\n if future.exception() is not None:\n print(\"A thread raised an exception: \"\n f\"{future.exception()}\")\n progress.close()\n except Exception as e:\n print(f\"An exception occurred: {e}\")\n finally:\n self._exp_postprocess()\n\n def _run_once(self, simulation_ind, progress):\n \"\"\"\n Run a single simulation of the experiment.\n\n Args:\n simulation_ind: Index of the current simulation.\n progress: Progress bar for tracking the simulation's progress.\n \"\"\"\n agents = self._generate_agents(simulation_ind)\n try:\n for round in range(self._n_round):\n results = queue.Queue()\n n_thread = len(agents) if round < 4 else 1\n with ThreadPoolExecutor(n_thread) as agent_executor:\n futures = []\n for agent_ind, agent in enumerate(agents):\n question = self. _generate_question(agent, round)\n futures.append(agent_executor\n .submit(agent.answer, question, \n agent_ind, round, \n simulation_ind))\n\n for ind, future in enumerate(as_completed(futures)):\n if future.exception() is not None:\n print(\"A thread raised an exception: \"\n f\"{future.exception()}\")\n else:\n idx, result = future.result()\n results.put((idx, result))\n results = list(results.queue)\n results = sorted(results, key=lambda x: x[0])\n progress.update(1)\n self._round_postprocess(simulation_ind, round, results, agents)\n\n except Exception as e:\n print(f\"error:{e}\")\n finally:\n agent_contexts = [agent.get_history() for agent in agents]\n with self._lock:\n self._update_record(self._record, agent_contexts, \n simulation_ind, agents)\n\n def save_record(self, output_dir: str):\n \"\"\"\n Save the experiment record to a file.\n\n Args:\n output_dir: The directory where the record will be saved.\n\n Returns:\n Tuple: A tuple with a success indicator and the file path.\n \"\"\"\n try:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n data_file = output_dir + '/data.p'\n # Save the record to a pickle file\n pickle.dump(self._record, open(data_file, \"wb\"))\n return True, data_file\n except Exception as e:\n print(f\"An exception occurred while saving the file: {e}\")\n print(\"Saving to the current directory instead.\")\n # Backup in case of an exception\n pickle.dump(self._record, open(\"backup_output_file.p\", \"wb\"))\n return False, \"\""
},
{
"identifier": "Agent2D",
"path": "modules/llm/agent_2d.py",
"snippet": "class Agent2D(GPT):\n \"\"\"\n A class representing a 2D agent with position control.\n\n Args:\n position (tuple): Current position of the agent (x, y).\n other_position (list of tuples): Positions of other agents.\n key (str): API key for the GPT model.\n name (str): Name of the agent (optional).\n model (str): GPT model name (default is 'gpt-3.5-turbo-0613').\n temperature (float): \n GPT temperature for text generation (default is 0.7).\n keep_memory (bool): \n Whether to keep a memory of conversations (default is False).\n \"\"\"\n \n def __init__(self, position, other_position, key: str, name=None,\n model: str = 'gpt-3.5-turbo-0613', temperature: float = 0.7, \n keep_memory=False):\n super().__init__(key=key, model=model, temperature=temperature, \n keep_memory=keep_memory)\n self._name = name\n self._velocity = np.zeros(2) # Current velocity of the agent\n self._max_traction_force = 50 # Maximum traction force of the agent (N)\n self._max_velocity = 3 # Maximum velocity of the agent (m/s)\n self._m = 15 # Mass of the agent (kg)\n self._mu = 0.02 # Friction coefficient\n # PID Parameters\n self.Kp = np.array([1.2, 1.2], dtype=np.float64)\n self.Ki = np.array([0.0, 0.0], dtype=np.float64)\n self.Kd = np.array([6, 6], dtype=np.float64)\n self.prev_error = np.array([0, 0], dtype=np.float64)\n self.integral = np.array([0, 0], dtype=np.float64)\n self._target_position = None # Target position of the agent\n self._position = position # Current position of the agent\n self._other_position = other_position # Positions of other agents\n self._trajectory = [] # Record the agent's movement trajectory\n self._target_trajectory = [] # Record the agent's target trajectory\n self._summarizer = GPT(key=key, model=\"gpt-3.5-turbo-0613\", \n keep_memory=False)\n self._summarize_result = \"\"\n self._summarizer_descriptions = summarizer_output_form\n self._summarizer.memories_update(role='system', content=summarizer_role)\n\n @property\n def name(self):\n return self._name\n\n @property\n def position(self):\n return self._position\n\n @position.setter\n def position(self, value):\n self._position = value\n\n @property\n def other_position(self):\n return self._other_position\n\n @property\n def trajectory(self):\n return self._trajectory\n\n @property\n def target_trajectory(self):\n return self._target_trajectory\n\n @property\n def target_position(self):\n return self._target_position\n\n @other_position.setter\n def other_position(self, value):\n self._other_position = value\n\n @property\n def summarize_result(self):\n return self._summarize_result\n\n def answer(self, input, idx, round, simulation_ind, try_times=0) -> tuple:\n \"\"\"\n Generate an answer using the GPT model.\n\n Args:\n input (str): Input text or prompt.\n idx: Index.\n round: Round.\n simulation_ind: Simulation index.\n try_times (int): Number of times the answer generation is attempted.\n\n Returns:\n tuple: Index and the target position (x, y).\n \"\"\"\n try:\n answer = self.generate_answer(input=input, try_times=try_times)\n self._target_position = self.parse_output(answer)\n self._target_trajectory.append(self._target_position)\n return idx, self._target_position\n except Exception as e:\n try_times += 1\n if try_times < 3:\n print(f\"An error occurred when agent {self._name} tried to \"\n f\"generate answers: {e},try_times: {try_times + 1}/3.\")\n return self.answer(input=input, idx=idx, round=round, \n simulation_ind=simulation_ind, \n try_times=try_times)\n else:\n print(\"After three attempts, the error still remains \"\n f\"unresolved, the input is:\\n'{input}'\\n.\")\n return idx, self._target_position\n\n def summarize(self, agent_answers):\n \"\"\"\n Generate a summary of agent answers.\n\n Args:\n agent_answers (list): List of agent answers.\n \"\"\"\n if len(agent_answers) == 0:\n self._summarize_result = \"\"\n else:\n self._summarize_result = self._summarizer.generate_answer(\n self._summarizer_descriptions.format(agent_answers))\n\n def parse_output(self, output):\n \"\"\"\n Parse the output for visualization.\n\n Args:\n output (str): Model's output.\n\n Returns:\n tuple: Parsed position value (x, y).\n \"\"\"\n matches = re.findall(r'\\((.*?)\\)', output)\n if matches:\n last_match = matches[-1]\n numbers = re.findall(r'[-+]?\\d*\\.\\d+|\\d+', last_match)\n if len(numbers) == 2:\n x = float(numbers[0])\n y = float(numbers[1])\n return (x, y)\n else:\n raise ValueError(f\"The last match {last_match} does \"\n \"not contain exactly 2 numbers.\")\n else:\n raise ValueError(f\"No array found in the output: \\n{output}\")\n\n def move(self, time_duration: float):\n \"\"\"\n Move the agent based on PID control.\n\n Args:\n time_duration (float): Time duration for the movement.\n \"\"\"\n if self._target_position is None:\n print(\"Target not set!\")\n return\n error = np.array(self._target_position) - np.array(self._position)\n self.integral += error * time_duration\n derivative = (error - self.prev_error) / time_duration\n force = self.Kp * error + self.Ki * self.integral + self.Kd * derivative\n force_magnitude = np.linalg.norm(force)\n if force_magnitude > self._max_traction_force:\n force = (force / force_magnitude) * self._max_traction_force\n # friction_force = -self._mu * self._m * 9.8 * np.sign(self._velocity) if abs(\n # np.linalg.norm(self._velocity)) > 0.1 else 0\n friction_force = 0\n net_force = force + friction_force\n acceleration = net_force / self._m\n self._velocity += acceleration * time_duration\n # Limit the velocity\n velocity_magnitude = np.linalg.norm(self._velocity)\n if velocity_magnitude > self._max_velocity:\n self._velocity = (self._velocity / velocity_magnitude) * self._max_velocity\n self._position += self._velocity * time_duration + 0.5 * acceleration * time_duration ** 2\n self._position = tuple(np.round(self._position, 2))\n self.prev_error = error\n self._trajectory.append(self._position)\n # print(f\"{self._name} position: {self._position}, \"\n # f\"target: {self._target_position}, velocity: {self._velocity}, \"\n # f\"force: {force}, friction_force: {friction_force}, \"\n # f\"net_force: {net_force}, acceleration: {acceleration}\")\n return self._position"
},
{
"identifier": "api_keys",
"path": "modules/llm/api_key.py",
"snippet": ""
},
{
"identifier": "names",
"path": "modules/llm/role.py",
"snippet": ""
},
{
"identifier": "agent_output_form",
"path": "modules/prompt/form.py",
"snippet": ""
},
{
"identifier": "stubborn",
"path": "modules/prompt/personality.py",
"snippet": ""
},
{
"identifier": "agent_role",
"path": "modules/prompt/scenario_2d.py",
"snippet": ""
},
{
"identifier": "gen_html",
"path": "modules/visual/gen_html.py",
"snippet": "def gen_html(data_path, html_dir):\n \"\"\"\n Generate HTML output for conversations.\n\n Args:\n data_path (str): The path to the data file.\n html_dir (str): The directory to save the generated HTML files.\n\n Generates HTML output for the conversations and saves them in the \n specified directory.\n \"\"\"\n results = read_conversations(data_path)\n\n for ind, res in enumerate(results):\n output_file = os.path.join(html_dir, f'simulation_{ind}.html')\n if os.path.exists(output_file):\n continue\n try:\n render_conversations_to_html(res, output_file, ind)\n print(f'HTML output has been written to {output_file}')\n except:\n continue"
},
{
"identifier": "plot_xy",
"path": "modules/visual/plot_2d.py",
"snippet": "def plot_xy(data_path):\n \"\"\"\n Plot the x and y coordinates of robots' trajectories.\n\n Args:\n data_path (str): The path to the data file containing trajectory data.\n \"\"\"\n data = read_from_file(data_path)\n all_positions = np.array(data['pos'][0])\n all_targets = np.array(data['target'][0])\n\n num_robots, num_points, _ = all_positions.shape\n num_targets = all_targets.shape[1]\n\n multiple = num_points // num_targets\n replicated_targets = np.repeat(all_targets, multiple, axis=1)\n\n round_time = np.arange(0, num_points * 0.1, 0.1)\n\n # Create subplots for each robot's trajectory\n fig, axs = plt.subplots(2, num_robots, figsize=(9, 4))\n coord_labels = ['x', 'y']\n\n for i in range(num_robots):\n for j, coord in enumerate(coord_labels):\n axs[j, i].set_xlim(0, 40)\n axs[j, i].tick_params(axis='both', labelsize=7)\n axs[j, i].plot(round_time, all_positions[i, :, j],\n color=colors[i], linestyle='-', linewidth=1,\n label=\"Actual\")\n axs[j, i].plot(round_time, replicated_targets[i, :, j], \n color=colors[i], linestyle='--', linewidth=1,\n label=\"Planned\")\n axs[j, i].set_title(f\"Robot {i + 1}\", fontsize=9)\n if i == 0:\n axs[j, i].set_ylabel(coord + ' (m)', fontsize=9)\n if coord == 'x':\n axs[j, i].legend(fontsize=7)\n\n plt.tight_layout()\n plt.savefig(os.path.join(os.path.dirname(data_path), 'trajectory.svg'))\n plt.show()"
},
{
"identifier": "video",
"path": "modules/visual/plot_2d.py",
"snippet": "def video(data_path):\n \"\"\"\n Create an animation of robot trajectories.\n\n Args:\n data_path (str): The path to the data file containing trajectory data.\n \"\"\"\n data = read_from_file(data_path)\n fig, ax = plt.subplots(figsize=(8, 4))\n lines = []\n dashed_lines = []\n scatters = []\n start_scatters = []\n\n for idx in range(len(data['pos'][0])):\n line, = ax.plot([], [], lw=2, color=colors[idx], \n label=f'Robot {idx + 1} trajectory')\n dashed_line, = ax.plot([], [], lw=2, linestyle='--', \n alpha=0.5, color=colors[idx])\n scatter = ax.scatter([], [], marker='o', \n c=colors[idx].reshape(1, -1), s=50)\n start_pos = data['pos'][0][idx][0]\n start_scatter = ax.scatter(start_pos[0], start_pos[1], alpha=0.5, \n c=colors[idx].reshape(1, -1), s=100,\n marker='o', \n label=f'Robot {idx + 1} initial position')\n lines.append(line)\n dashed_lines.append(dashed_line)\n scatters.append(scatter)\n start_scatters.append(start_scatter)\n mean_start_x = np.array([data['pos'][0][idx][0][0]\n for idx in range(len(data['pos'][0]))]).mean()\n mean_start_y = np.array([data['pos'][0][idx][0][1] \n for idx in range(len(data['pos'][0]))]).mean()\n mean_start_scatter = ax.scatter([], [], c=colors[-1].reshape(1, -1), \n marker='$*$', s=100, \n label=\"Average initial position\")\n mean_start_scatter.set_offsets([mean_start_x, mean_start_y])\n\n def init():\n ax.set_xlabel('x (m)')\n ax.set_ylabel('y (m)')\n ax.set_ylim(0, 80)\n ax.set_xticks(range(-20, 130, 10))\n for line, dashed_line, scatter in zip(lines, dashed_lines, scatters):\n line.set_data([], [])\n dashed_line.set_data([], [])\n scatter.set_offsets(np.empty((0, 2)))\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles=handles, labels=labels, loc=\"upper left\", \n labelspacing=0.6, fontsize=10)\n return lines + dashed_lines + scatters + start_scatters\n\n def animate(i):\n for idx, (line, dashed_line, scatter) in enumerate(zip(lines, dashed_lines, scatters)):\n all_positions = []\n for key in data['pos']:\n all_positions.extend(data['pos'][key][idx])\n line.set_data([x for x, y in all_positions[:i + 1]], \n [y for x, y in all_positions[:i + 1]])\n target_key = max(0, i - 20) // 20\n start_x, start_y = all_positions[i]\n target_x, target_y = data['target'][0][idx][target_key]\n dashed_line.set_data([start_x, target_x], [start_y, target_y])\n scatter.set_offsets([start_x, start_y])\n\n if i == len(data['pos'][0][0]) - 1:\n img_output_path = os.path.join(os.path.dirname(data_path), \n 'last_frame.svg')\n plt.savefig(img_output_path, bbox_inches='tight')\n return lines + dashed_lines + scatters\n\n output_path = os.path.join(os.path.dirname(data_path), 'animation.gif')\n ani = FuncAnimation(fig, animate, frames=len(data['pos'][0][0]), \n init_func=init, blit=False)\n ani.save(output_path, fps=20)\n plt.show()"
}
] | import numpy as np
import pickle
from .template import Template
from ..llm.agent_2d import Agent2D
from ..llm.api_key import api_keys
from ..llm.role import names
from ..prompt.form import agent_output_form
from ..prompt.personality import stubborn, suggestible
from ..prompt.scenario_2d import agent_role, game_description, round_description
from ..visual.gen_html import gen_html
from ..visual.plot_2d import plot_xy, video | 6,461 |
class Vector2dDebate(Template):
"""
Vector2dDebate is a class that simulates a 2D debate scenario with multiple
agents.
This class provides the framework to conduct 2D debates with agents and
record their trajectories.
Args:
args:
An object containing configuration parameters for the debate
simulation.
connectivity_matrix:
A square matrix defining agent knowledge connectivity.
Raises:
ValueError:
If the sum of stubborn and suggestible agents exceeds the total
number of agents,
if there are insufficient API keys for the agents, or if the
connectivity matrix is not appropriate.
"""
def __init__(self, args, connectivity_matrix):
"""
Initialize the Vector2dDebate instance.
Args:
args: An object containing configuration options.
connectivity_matrix: A matrix defining agent knowledge connectivity.
Raises:
ValueError: If the input parameters are invalid.
"""
super().__init__(args)
self._dt = 0.1
self._n_agents = args.agents
self._init_input = game_description + "\n\n" + agent_output_form
self._round_description = round_description
self._positions = [[]] * args.n_exp
self._output_file = args.out_file
self._n_suggestible = args.n_suggestible
self._n_stubborn = args.n_stubborn
self._trajectory = {"pos": {}, "target": {}} # A dictionary for recording agent trajectories
# np.random.seed(0)
# Define the connectivity matrix for agent knowledge
# m(i, j) = 1 means agent i knows the position of agent j
self._m = connectivity_matrix
# Safety checks for input parameters
if args.n_stubborn + args.n_suggestible > self._n_agents:
raise ValueError("stubborn + suggestible agents is more than "
f"{self._n_agents}")
if len(api_keys) < self._n_agents * args.n_exp:
raise ValueError("api_keys are not enough for "
f"{self._n_agents} agents")
if self._m.shape[0] != self._m.shape[1]:
raise ValueError("connectivity_matrix is not a square matrix, "
f"shape: {self._m.shape}")
if self._m.shape[0] != self._n_agents:
raise ValueError("connectivity_matrix is not enough for "
f"{self._n_agents} agents, shape: {self._m.shape}")
def _generate_agents(self, simulation_ind):
"""Generate agent instances for the simulation.
Args:
simulation_ind: Index of the simulation.
Returns:
List of Agent2D instances.
"""
agents = []
position = (np.array([[20, 20], [80, 20], [50, 80]])
+ np.random.randint(-10, 10, size=(self._n_agents, 2)))
for idx in range(self._n_agents):
position_others = [(x, y) for x, y in position[self._m[idx, :]]]
agent = Agent2D(position=tuple(position[idx]),
other_position=position_others,
key=api_keys[simulation_ind * self._n_agents + idx],
model="gpt-3.5-turbo-0613",
name=names[idx])
# add personality, neutral by default
personality = ""
if idx < self._n_stubborn:
personality = stubborn
elif (self._n_stubborn <= idx
< self._n_stubborn + self._n_suggestible):
personality = suggestible
agent.memories_update(role='system',
content=agent_role + personality)
agents.append(agent)
self._positions[simulation_ind] = position
return agents
def _generate_question(self, agent, round) -> str:
"""Generate a question for an agent in a round.
Args:
agent: An Agent2D instance.
round: The current round.
Returns:
A formatted string containing the question.
"""
input = self._init_input.format(agent.position, agent.other_position)
return input
def _exp_postprocess(self):
"""Post-process the experiment data, including saving and
generating visualizations."""
is_success, filename = self.save_record(self._output_file)
if is_success:
# Call functions to plot and generate HTML
trajectory_file = self._output_file + '/trajectory.p'
plot_xy(trajectory_file)
video(trajectory_file)
| """
MIT License
Copyright (c) [2023] [Intelligent Unmanned Systems Laboratory at
Westlake University]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM,
OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE, OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class Vector2dDebate(Template):
"""
Vector2dDebate is a class that simulates a 2D debate scenario with multiple
agents.
This class provides the framework to conduct 2D debates with agents and
record their trajectories.
Args:
args:
An object containing configuration parameters for the debate
simulation.
connectivity_matrix:
A square matrix defining agent knowledge connectivity.
Raises:
ValueError:
If the sum of stubborn and suggestible agents exceeds the total
number of agents,
if there are insufficient API keys for the agents, or if the
connectivity matrix is not appropriate.
"""
def __init__(self, args, connectivity_matrix):
"""
Initialize the Vector2dDebate instance.
Args:
args: An object containing configuration options.
connectivity_matrix: A matrix defining agent knowledge connectivity.
Raises:
ValueError: If the input parameters are invalid.
"""
super().__init__(args)
self._dt = 0.1
self._n_agents = args.agents
self._init_input = game_description + "\n\n" + agent_output_form
self._round_description = round_description
self._positions = [[]] * args.n_exp
self._output_file = args.out_file
self._n_suggestible = args.n_suggestible
self._n_stubborn = args.n_stubborn
self._trajectory = {"pos": {}, "target": {}} # A dictionary for recording agent trajectories
# np.random.seed(0)
# Define the connectivity matrix for agent knowledge
# m(i, j) = 1 means agent i knows the position of agent j
self._m = connectivity_matrix
# Safety checks for input parameters
if args.n_stubborn + args.n_suggestible > self._n_agents:
raise ValueError("stubborn + suggestible agents is more than "
f"{self._n_agents}")
if len(api_keys) < self._n_agents * args.n_exp:
raise ValueError("api_keys are not enough for "
f"{self._n_agents} agents")
if self._m.shape[0] != self._m.shape[1]:
raise ValueError("connectivity_matrix is not a square matrix, "
f"shape: {self._m.shape}")
if self._m.shape[0] != self._n_agents:
raise ValueError("connectivity_matrix is not enough for "
f"{self._n_agents} agents, shape: {self._m.shape}")
def _generate_agents(self, simulation_ind):
"""Generate agent instances for the simulation.
Args:
simulation_ind: Index of the simulation.
Returns:
List of Agent2D instances.
"""
agents = []
position = (np.array([[20, 20], [80, 20], [50, 80]])
+ np.random.randint(-10, 10, size=(self._n_agents, 2)))
for idx in range(self._n_agents):
position_others = [(x, y) for x, y in position[self._m[idx, :]]]
agent = Agent2D(position=tuple(position[idx]),
other_position=position_others,
key=api_keys[simulation_ind * self._n_agents + idx],
model="gpt-3.5-turbo-0613",
name=names[idx])
# add personality, neutral by default
personality = ""
if idx < self._n_stubborn:
personality = stubborn
elif (self._n_stubborn <= idx
< self._n_stubborn + self._n_suggestible):
personality = suggestible
agent.memories_update(role='system',
content=agent_role + personality)
agents.append(agent)
self._positions[simulation_ind] = position
return agents
def _generate_question(self, agent, round) -> str:
"""Generate a question for an agent in a round.
Args:
agent: An Agent2D instance.
round: The current round.
Returns:
A formatted string containing the question.
"""
input = self._init_input.format(agent.position, agent.other_position)
return input
def _exp_postprocess(self):
"""Post-process the experiment data, including saving and
generating visualizations."""
is_success, filename = self.save_record(self._output_file)
if is_success:
# Call functions to plot and generate HTML
trajectory_file = self._output_file + '/trajectory.p'
plot_xy(trajectory_file)
video(trajectory_file) | gen_html(filename, self._output_file) | 7 | 2023-10-20 07:58:07+00:00 | 8k |
inngest/inngest-py | inngest/flask.py | [
{
"identifier": "client_lib",
"path": "inngest/_internal/client_lib.py",
"snippet": "_DEV_SERVER_EVENT_KEY = \"NO_EVENT_KEY_SET\"\nclass Inngest:\n def api_origin(self) -> str:\n def event_api_origin(self) -> str:\n def event_key(self) -> str | None:\n def signing_key(self) -> str | None:\n def __init__(\n self,\n *,\n api_base_url: str | None = None,\n app_id: str,\n event_api_base_url: str | None = None,\n event_key: str | None = None,\n is_production: bool | None = None,\n logger: types.Logger | None = None,\n middleware: list[\n type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]\n ]\n | None = None,\n signing_key: str | None = None,\n ) -> None:\n def _build_send_request(\n self,\n events: list[event_lib.Event],\n ) -> types.MaybeError[httpx.Request]:\n def add_middleware(\n self,\n middleware: type[\n middleware_lib.Middleware | middleware_lib.MiddlewareSync\n ],\n ) -> None:\n def create_function(\n self,\n *,\n batch_events: function_config.Batch | None = None,\n cancel: list[function_config.Cancel] | None = None,\n debounce: function_config.Debounce | None = None,\n fn_id: str,\n middleware: list[\n type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]\n ]\n | None = None,\n name: str | None = None,\n on_failure: function.FunctionHandlerAsync\n | function.FunctionHandlerSync\n | None = None,\n rate_limit: function_config.RateLimit | None = None,\n retries: int | None = None,\n throttle: function_config.Throttle | None = None,\n trigger: function_config.TriggerCron | function_config.TriggerEvent,\n ) -> typing.Callable[\n def decorator(\n func: function.FunctionHandlerAsync | function.FunctionHandlerSync,\n ) -> function.Function:\n async def send(\n self,\n events: event_lib.Event | list[event_lib.Event],\n ) -> list[str]:\n def send_sync(\n self,\n events: event_lib.Event | list[event_lib.Event],\n ) -> list[str]:\n def set_logger(self, logger: types.Logger) -> None:\ndef _extract_ids(body: object) -> list[str]:"
},
{
"identifier": "comm",
"path": "inngest/_internal/comm.py",
"snippet": "class CommResponse:\nclass CommHandler:\n def __init__(\n self,\n *,\n body: object = None,\n headers: dict[str, str] | None = None,\n status_code: int = http.HTTPStatus.OK.value,\n ) -> None:\n def from_call_result(\n cls,\n logger: types.Logger,\n call_res: execution.CallResult,\n ) -> CommResponse:\n def from_error(\n cls,\n logger: types.Logger,\n err: Exception,\n ) -> CommResponse:\n def __init__(\n self,\n *,\n api_base_url: str | None = None,\n client: client_lib.Inngest,\n framework: const.Framework,\n functions: list[function.Function],\n signing_key: str | None = None,\n ) -> None:\n def _build_registration_request(\n self,\n app_url: str,\n server_kind: const.ServerKind | None,\n ) -> types.MaybeError[httpx.Request]:\n async def call_function(\n self,\n *,\n call: execution.Call,\n fn_id: str,\n req_sig: net.RequestSignature,\n target_hashed_id: str,\n ) -> CommResponse:\n def call_function_sync(\n self,\n *,\n call: execution.Call,\n fn_id: str,\n req_sig: net.RequestSignature,\n target_hashed_id: str,\n ) -> CommResponse:\n def _get_function(self, fn_id: str) -> types.MaybeError[function.Function]:\n def get_function_configs(\n self,\n app_url: str,\n ) -> types.MaybeError[list[function_config.FunctionConfig]]:\n def inspect(self, server_kind: const.ServerKind | None) -> CommResponse:\n def _parse_registration_response(\n self,\n server_res: httpx.Response,\n server_kind: const.ServerKind | None,\n ) -> CommResponse:\n async def register(\n self,\n *,\n app_url: str,\n server_kind: const.ServerKind | None,\n ) -> CommResponse:\n def register_sync(\n self,\n *,\n app_url: str,\n server_kind: const.ServerKind | None,\n ) -> CommResponse:\n async def _respond(\n self,\n middleware: middleware_lib.MiddlewareManager,\n value: execution.CallResult | Exception,\n ) -> CommResponse:\n def _respond_sync(\n self,\n middleware: middleware_lib.MiddlewareManager,\n value: execution.CallResult | Exception,\n ) -> CommResponse:\n def _validate_registration(\n self,\n server_kind: const.ServerKind | None,\n ) -> types.MaybeError[None]:"
},
{
"identifier": "const",
"path": "inngest/_internal/const.py",
"snippet": "DEFAULT_API_ORIGIN: typing.Final = \"https://api.inngest.com/\"\nDEFAULT_EVENT_ORIGIN: typing.Final = \"https://inn.gs/\"\nDEV_SERVER_ORIGIN: typing.Final = \"http://127.0.0.1:8288/\"\nLANGUAGE: typing.Final = \"py\"\nROOT_STEP_ID: typing.Final = \"step\"\nVERSION: typing.Final = \"0.3.1\"\n API_BASE_URL = \"INNGEST_API_BASE_URL\"\n DEV = \"INNGEST_DEV\"\n EVENT_API_BASE_URL = \"INNGEST_EVENT_API_BASE_URL\"\n EVENT_KEY = \"INNGEST_EVENT_KEY\"\n SERVE_ORIGIN = \"INNGEST_SERVE_ORIGIN\"\n SERVE_PATH = \"INNGEST_SERVE_PATH\"\n SIGNING_KEY = \"INNGEST_SIGNING_KEY\"\n DISALLOWED_REGISTRATION_INITIATOR = \"disallowed_registration_initiator\"\n INVALID_BASE_URL = \"invalid_base_url\"\n INVALID_BODY = \"invalid_body\"\n INVALID_FUNCTION_CONFIG = \"invalid_function_config\"\n INVALID_REQUEST_SIGNATURE = \"invalid_request_signature\"\n MISMATCHED_SYNC = \"mismatched_sync\"\n MISSING_EVENT_KEY = \"missing_event_key\"\n MISSING_FUNCTION = \"missing_function\"\n MISSING_HEADER = \"missing_header\"\n MISSING_SIGNING_KEY = \"missing_signing_key\"\n REGISTRATION_ERROR = \"registration_error\"\n UNEXPECTED_STEP = \"unexpected_step\"\n UNKNOWN = \"unknown\"\n UNSERIALIZABLE_OUTPUT = \"unserializable_output\"\n DJANGO = \"django\"\n FAST_API = \"fast_api\"\n FLASK = \"flask\"\n TORNADO = \"tornado\"\n CONTENT_TYPE = \"Content-Type\"\n EXPECTED_SERVER_KIND = \"X-Inngest-Expected-Server-Kind\"\n FRAMEWORK = \"X-Inngest-Framework\"\n NO_RETRY = \"X-Inngest-No-Retry\"\n SDK = \"X-Inngest-SDK\"\n SERVER_KIND = \"X-Inngest-Server-Kind\"\n SERVER_TIMING = \"Server-Timing\"\n SIGNATURE = \"X-Inngest-Signature\"\n USER_AGENT = \"User-Agent\"\n FUNCTION_ID = \"fnId\"\n STEP_ID = \"stepId\"\n FUNCTION_FAILED = \"inngest/function.failed\"\n CLOUD = \"cloud\"\n DEV_SERVER = \"dev\"\nclass EnvKey(enum.Enum):\nclass ErrorCode(enum.Enum):\nclass Framework(enum.Enum):\nclass HeaderKey(enum.Enum):\nclass QueryParamKey(enum.Enum):\nclass InternalEvents(enum.Enum):\nclass ServerKind(enum.Enum):"
},
{
"identifier": "errors",
"path": "inngest/_internal/errors.py",
"snippet": "class InternalError(Exception):\nclass DisallowedRegistrationError(InternalError):\nclass InvalidBaseURLError(InternalError):\nclass InvalidConfigError(InternalError):\nclass MismatchedSyncError(InternalError):\nclass InvalidRequestSignatureError(InternalError):\nclass InvalidBodyError(InternalError):\nclass MissingEventKeyError(InternalError):\nclass MissingFunctionError(InternalError):\nclass MissingHeaderError(InternalError):\nclass MissingParamError(InternalError):\nclass MissingSigningKeyError(InternalError):\nclass RegistrationError(InternalError):\nclass UnknownError(InternalError):\nclass UnexpectedStepError(InternalError):\nclass UnserializableOutputError(InternalError):\nclass ExternalError(Exception):\nclass NonRetriableError(ExternalError):\n def __init__(\n self, *, code: const.ErrorCode, message: str | None = None\n ) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def from_validation_error(\n cls,\n err: pydantic.ValidationError,\n ) -> InvalidConfigError:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(self, message: str | None = None) -> None:\n def __init__(\n self,\n message: str | None = None,\n cause: dict[str, object] | None = None,\n ) -> None:"
},
{
"identifier": "execution",
"path": "inngest/_internal/execution.py",
"snippet": "class Call(types.BaseModel):\nclass CallContext(types.BaseModel):\nclass CallStack(types.BaseModel):\nclass CallError(types.BaseModel):\nclass FunctionCallResponse(types.BaseModel):\nclass StepResponse(types.BaseModel):\nclass Output(types.BaseModel):\nclass Opcode(enum.Enum):\n def from_error(cls, err: Exception) -> CallError:\ndef is_step_call_responses(\n value: object,\n) -> typing.TypeGuard[list[StepResponse]]:\n INVOKE = \"InvokeFunction\"\n PLANNED = \"StepPlanned\"\n SLEEP = \"Sleep\"\n STEP = \"Step\"\n WAIT_FOR_EVENT = \"WaitForEvent\"\nUNSPECIFIED_STEP_ID = \"step\""
},
{
"identifier": "function",
"path": "inngest/_internal/function.py",
"snippet": "class Context:\nclass _Config:\nclass FunctionHandlerAsync(typing.Protocol):\nclass FunctionHandlerSync(typing.Protocol):\nclass FunctionOpts(types.BaseModel):\nclass Function:\nclass _UserError(Exception):\n def __call__(\n self,\n ctx: Context,\n step: step_lib.Step,\n ) -> typing.Awaitable[types.Serializable]:\n def __call__(\n self,\n ctx: Context,\n step: step_lib.StepSync,\n ) -> types.Serializable:\ndef _is_function_handler_async(\n value: FunctionHandlerAsync | FunctionHandlerSync,\n) -> typing.TypeGuard[FunctionHandlerAsync]:\ndef _is_function_handler_sync(\n value: FunctionHandlerAsync | FunctionHandlerSync,\n) -> typing.TypeGuard[FunctionHandlerSync]:\n def convert_validation_error(\n self,\n err: pydantic.ValidationError,\n ) -> BaseException:\n def id(self) -> str:\n def is_handler_async(self) -> bool:\n def is_on_failure_handler_async(self) -> bool | None:\n def on_failure_fn_id(self) -> str | None:\n def __init__(\n self,\n opts: FunctionOpts,\n trigger: function_config.TriggerCron | function_config.TriggerEvent,\n handler: FunctionHandlerAsync | FunctionHandlerSync,\n middleware: list[\n type[middleware_lib.Middleware | middleware_lib.MiddlewareSync]\n ]\n | None = None,\n ) -> None:\n async def call( # noqa: C901\n self,\n call: execution.Call,\n client: client_lib.Inngest,\n ctx: Context,\n fn_id: str,\n middleware: middleware_lib.MiddlewareManager,\n target_hashed_id: str | None,\n ) -> execution.CallResult:\n def call_sync( # noqa: C901\n self,\n call: execution.Call,\n client: client_lib.Inngest,\n ctx: Context,\n fn_id: str,\n middleware: middleware_lib.MiddlewareManager,\n target_hashed_id: str | None,\n ) -> execution.CallResult:\n def get_config(self, app_url: str) -> _Config:\n def get_id(self) -> str:\n def __init__(self, err: Exception) -> None:\ndef _remove_first_traceback_frame(err: Exception) -> None:"
},
{
"identifier": "net",
"path": "inngest/_internal/net.py",
"snippet": "def create_headers(\n framework: const.Framework | None,\n server_kind: const.ServerKind | None,\n) -> dict[str, str]:\ndef create_serve_url(\n *,\n request_url: str,\n serve_origin: str | None,\n serve_path: str | None,\n) -> str:\ndef normalize_headers(headers: dict[str, str]) -> dict[str, str]:\ndef parse_url(url: str) -> str:\n def __init__(\n self,\n body: bytes,\n headers: dict[str, str],\n is_production: bool,\n ) -> None:\n def validate(self, signing_key: str | None) -> types.MaybeError[None]:\nclass RequestSignature:"
},
{
"identifier": "transforms",
"path": "inngest/_internal/transforms.py",
"snippet": "def get_traceback(err: Exception) -> str:\ndef hash_signing_key(key: str) -> str:\ndef hash_step_id(step_id: str) -> str:\ndef dump_json(obj: object) -> types.MaybeError[str]:\ndef remove_signing_key_prefix(key: str) -> str:\ndef prep_body(obj: types.T) -> types.T:\n def second(cls, count: int = 1) -> int:\n def minute(cls, count: int = 1) -> int:\n def hour(cls, count: int = 1) -> int:\n def day(cls, count: int = 1) -> int:\n def week(cls, count: int = 1) -> int:\ndef to_duration_str(\n ms: int | datetime.timedelta,\n) -> types.MaybeError[str]:\ndef to_iso_utc(value: datetime.datetime) -> str:\ndef get_server_kind(\n headers: dict[str, str],\n) -> const.ServerKind | None | Exception:\nasync def maybe_await(\n value: types.T | typing.Awaitable[types.T],\n) -> types.T:\nclass _Duration:"
},
{
"identifier": "types",
"path": "inngest/_internal/types.py",
"snippet": "T = typing.TypeVar(\"T\")\nclass EmptySentinel:\nclass BaseModel(pydantic.BaseModel):\n def __init__(\n __pydantic_self__, # noqa: N805\n *args: object,\n **kwargs: object,\n ) -> None:\n def convert_validation_error(\n self,\n err: pydantic.ValidationError,\n ) -> BaseException:\n def from_raw(\n cls: type[BaseModelT],\n raw: object,\n ) -> BaseModelT | Exception:\n def to_dict(self) -> MaybeError[dict[str, object]]:"
}
] | import json
import flask
from ._internal import (
client_lib,
comm,
const,
errors,
execution,
function,
net,
transforms,
types,
) | 3,890 | """Flask integration for Inngest."""
FRAMEWORK = const.Framework.FLASK
def serve(
app: flask.Flask,
client: client_lib.Inngest,
functions: list[function.Function],
*,
serve_origin: str | None = None,
serve_path: str | None = None,
) -> None:
"""
Serve Inngest functions in a Flask app.
Args:
----
app: Flask app.
client: Inngest client.
functions: List of functions to serve.
serve_origin: Origin to serve the functions from.
serve_path: Path to serve the functions from.
"""
| """Flask integration for Inngest."""
FRAMEWORK = const.Framework.FLASK
def serve(
app: flask.Flask,
client: client_lib.Inngest,
functions: list[function.Function],
*,
serve_origin: str | None = None,
serve_path: str | None = None,
) -> None:
"""
Serve Inngest functions in a Flask app.
Args:
----
app: Flask app.
client: Inngest client.
functions: List of functions to serve.
serve_origin: Origin to serve the functions from.
serve_path: Path to serve the functions from.
"""
| handler = comm.CommHandler( | 1 | 2023-10-19 01:02:30+00:00 | 8k |
skleee/GLEN | src/tevatron/modeling/glen_t5_modeling.py | [
{
"identifier": "T5Config",
"path": "src/tevatron/modeling/glen_t5_config.py",
"snippet": "class T5Config(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a :class:`~transformers.T5Model` or a\n :class:`~transformers.TFT5Model`. It is used to instantiate a T5 model according to the specified\n arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar\n configuration to that of the T5 `t5-small <https://huggingface.co/t5-small>`__ architecture.\n\n Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used\n to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`\n for more information.\n\n Arguments:\n vocab_size (:obj:`int`, `optional`, defaults to 32128):\n Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the\n :obj:`inputs_ids` passed when calling :class:`~transformers.T5Model` or\n :class:`~transformers.TFT5Model`.\n n_positions (:obj:`int`, `optional`, defaults to 512):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n d_model (:obj:`int`, `optional`, defaults to 512):\n Size of the encoder layers and the pooler layer.\n d_kv (:obj:`int`, `optional`, defaults to 64):\n Size of the key, query, value projections per attention head. :obj:`d_kv` has to be equal to\n :obj:`d_model // num_heads`.\n d_ff (:obj:`int`, `optional`, defaults to 2048):\n Size of the intermediate feed forward layer in each :obj:`T5Block`.\n num_layers (:obj:`int`, `optional`, defaults to 6):\n Number of hidden layers in the Transformer encoder.\n num_decoder_layers (:obj:`int`, `optional`):\n Number of hidden layers in the Transformer decoder. Will use the same value as :obj:`num_layers` if not set.\n num_heads (:obj:`int`, `optional`, defaults to 8):\n Number of attention heads for each attention layer in\n the Transformer encoder.\n relative_attention_num_buckets (:obj:`int`, `optional`, defaults to 32):\n The number of buckets to use for each attention layer.\n dropout_rate (:obj:`float`, `optional`, defaults to 0.1):\n The ratio for all dropout layers.\n layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-6):\n The epsilon used by the layer normalization layers.\n initializer_factor (:obj:`float`, `optional`, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n \"\"\"\n model_type = \"t5\"\n\n def __init__(\n self,\n vocab_size=32128,\n n_positions=512,\n d_model=512,\n d_kv=64,\n d_ff=2048,\n num_layers=6,\n num_decoder_layers=None,\n num_heads=8,\n relative_attention_num_buckets=32,\n dropout_rate=0.1,\n layer_norm_epsilon=1e-6,\n initializer_factor=1.0,\n is_encoder_decoder=True,\n pad_token_id=0,\n eos_token_id=1,\n **kwargs\n ):\n super().__init__(\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n is_encoder_decoder=is_encoder_decoder,\n **kwargs,\n )\n self.vocab_size = vocab_size\n self.n_positions = n_positions\n self.d_model = d_model\n self.d_kv = d_kv\n self.d_ff = d_ff\n self.num_layers = num_layers\n self.num_decoder_layers = (\n num_decoder_layers if num_decoder_layers is not None else self.num_layers\n ) # default = symmetry\n self.num_heads = num_heads\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.dropout_rate = dropout_rate\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_factor = initializer_factor\n\n @property\n def max_position_embeddings(self):\n return self.n_positions\n\n @property\n def hidden_size(self):\n return self.d_model\n\n @property\n def num_attention_heads(self):\n return self.num_heads\n\n @property\n def num_hidden_layers(self):\n return self.num_layers"
},
{
"identifier": "BaseModelOutputWithPast",
"path": "src/tevatron/modeling/glen_t5_outputs.py",
"snippet": "class BaseModelOutputWithPast(ModelOutput):\n \"\"\"\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\n Args:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n\n If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape\n :obj:`(batch_size, 1, hidden_size)` is output.\n past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape\n :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see\n :obj:`past_key_values` input) to speed up sequential decoding.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor\n past_key_values: Optional[List[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None"
},
{
"identifier": "Seq2SeqModelOutput",
"path": "src/tevatron/modeling/glen_t5_outputs.py",
"snippet": "class Seq2SeqModelOutput(ModelOutput):\n \"\"\"\n Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential\n decoding.\n\n Args:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the decoder of the model.\n\n If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size, 1, hidden_size)` is output.\n past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape\n :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be\n used (see :obj:`past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.\n decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.\n encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor\n past_key_values: Optional[List[torch.FloatTensor]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None"
}
] | import copy
import math
import os
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import re
import numpy as np
import tensorflow as tf
from transformers import logging
from transformers.modeling_utils import (
PreTrainedModel,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from .glen_t5_config import T5Config
from .glen_t5_outputs import BaseModelOutputWithPast, Seq2SeqModelOutput
from tevatron.modeling import (
T5ForConditionalGeneration_GLEN as T5ForConditionalGeneration,
) | 5,751 | `T5 Training <./t5.html#training>`__.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for sequence to sequence training. T5 uses the :obj:`pad_token_id` as the starting token for
:obj:`decoder_input_ids` generation.
If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at
`T5 Training <./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both
unset, :obj:`decoder_input_ids` takes the value of :obj:`input_ids`.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`: `attentions`)
:obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation.
If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds` have to be input
(see :obj:`past_key_values`).
This is useful if you want more control over how to convert :obj:`decoder_input_ids` indices into
associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both
unset, :obj:`decoder_inputs_embeds` takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states"
"without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
if self.multiple_decoder:
self.decoder_list = []
for i in range(self.decoder_num):
self.decoder_list.append(T5Stack(decoder_config, self.shared))
else:
self.decoder = T5Stack(decoder_config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(
| # coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model. """
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
####################################################
# This dict contrains shortcut names and associated url
# for the pretrained weights provided with the models
####################################################
T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
# See all T5 models at https://huggingface.co/models?filter=t5
]
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n
in [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
# elif scope_names[0] == 'scale':
# pointer = getattr(pointer, 'weight')
# elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':
# pointer = getattr(pointer, 'bias')
# elif scope_names[0] == 'squad':
# pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(
"Transposing numpy weight of shape {} for {}".format(array.shape, name)
)
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(
"Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys()))
)
# logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)
####################################################
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""Construct a layernorm module in the T5 style
No bias and no substraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
# layer norm should always be calculated in float32
variance = x.to(torch.float32).pow(2).mean(-1, keepdim=True)
x = x / torch.sqrt(variance + self.variance_epsilon)
if self.weight.dtype == torch.float16:
x = x.to(torch.float16)
return self.weight * x
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
h = self.wi(hidden_states)
h = F.relu(h)
h = self.dropout(h)
h = self.wo(h)
return h
class T5LayerFF(nn.Module):
def __init__(self, config):
super().__init__()
self.DenseReluDense = T5DenseReluDense(config)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
norm_x = self.layer_norm(hidden_states)
y = self.DenseReluDense(norm_x)
layer_output = hidden_states + self.dropout(y)
return layer_output
class T5Attention(nn.Module):
def __init__(
self,
config: T5Config,
has_relative_attention_bias=False,
is_bidirectional=False,
):
super().__init__()
self.is_bidirectional = is_bidirectional
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.d_kv = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.d_kv
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(
self.relative_attention_num_buckets, self.n_heads
)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.d_kv, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.d_kv * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(
relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance
map to the same bucket. All relative positions <=-max_distance map to the
same bucket. This should allow for more graceful generalization to longer
sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).to(
torch.long
) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1)
)
ret += torch.where(is_small, n, val_if_large)
return ret
def compute_bias(self, qlen, klen):
"""Compute binned relative position bias"""
context_position = torch.arange(qlen, dtype=torch.long)[:, None]
memory_position = torch.arange(klen, dtype=torch.long)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position, # shape (qlen, klen)
bidirectional=self.is_bidirectional,
num_buckets=self.relative_attention_num_buckets,
)
rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(
rp_bucket
) # shape (qlen, klen, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(
0
) # shape (1, num_heads, qlen, klen)
return values
def forward(
self,
input,
mask=None,
kv=None,
position_bias=None,
past_key_value=None,
head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
# past_key_value[0] is (bs, n_heads, q_len - 1, dim_per_head)
bs, qlen, dim = input.size()
if past_key_value is not None:
assert self.is_decoder is True, "Encoder cannot cache past key value states"
assert (
len(past_key_value) == 2
), "past_key_value should have 2 past states: keys and values. Got {} past states".format(
len(past_key_value)
)
real_qlen = (
qlen + past_key_value[0].shape[2]
if query_length is None
else query_length
)
else:
real_qlen = qlen
if kv is None:
klen = real_qlen
else:
klen = kv.size(1)
def shape(x):
"""projection"""
return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)
def unshape(x):
"""compute context"""
return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)
q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)
elif past_key_value is None:
k = v = kv
k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)
if past_key_value is not None:
if kv is None:
k_, v_ = past_key_value
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = past_key_value
if self.is_decoder and use_cache is True:
present_key_value_state = ((k, v),)
else:
present_key_value_state = (None,)
# (bs, n_heads, qlen, klen)
scores = torch.matmul(
q, k.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", q, k), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
raise ValueError(
"No position_bias provided and no weights to compute position_bias"
)
position_bias = self.compute_bias(real_qlen, klen)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -qlen:, :]
if mask is not None:
position_bias = position_bias + mask # (bs, n_heads, qlen, klen)
scores += position_bias
weights = F.softmax(scores.float(), dim=-1).type_as(
scores
) # (bs, n_heads, qlen, klen)
weights = F.dropout(
weights, p=self.dropout, training=self.training
) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
context = self.o(context)
outputs = (context,) + present_key_value_state
if output_attentions:
outputs = outputs + (weights,)
if self.has_relative_attention_bias:
outputs = outputs + (position_bias,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(
config,
has_relative_attention_bias=has_relative_attention_bias,
is_bidirectional=not config.is_decoder,
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
norm_x,
mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[
1:
] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.EncDecAttention = T5Attention(
config,
has_relative_attention_bias=has_relative_attention_bias,
is_bidirectional=True,
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
kv,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
norm_x,
mask=attention_mask,
kv=kv,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[
1:
] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(
T5LayerSelfAttention(
config, has_relative_attention_bias=has_relative_attention_bias
)
)
if self.is_decoder:
self.layer.append(
T5LayerCrossAttention(
config, has_relative_attention_bias=has_relative_attention_bias
)
)
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
error_message = "There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states".format(
expected_num_past_key_values,
"2 (past / key) for cross attention"
if expected_num_past_key_values == 4
else "",
len(past_key_value),
)
assert len(past_key_value) == expected_num_past_key_values, error_message
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[
2:
] # Keep self-attention outputs and relative position weights
if self.is_decoder and encoder_hidden_states is not None:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
kv=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
head_mask=head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = (
present_key_value_state + cross_attention_outputs[1]
)
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
outputs = (hidden_states,)
# Add attentions if we output them
outputs = outputs + (present_key_value_state,) + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
class T5PreTrainedModel(PreTrainedModel):
"""An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = T5Config
load_tf_weights = load_tf_weights_in_t5
base_model_prefix = "transformer"
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = (
self.config.initializer_factor
) # Used for testing weights initialization
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseReluDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_model) ** -0.5)
)
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)
)
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
d_kv = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(
mean=0.0, std=factor * ((d_model * d_kv) ** -0.5)
)
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.o.weight.data.normal_(
mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5)
)
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(
mean=0.0, std=factor * ((d_model) ** -0.5)
)
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information"
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert (
pad_token_id is not None
), "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(
shifted_input_ids >= 0
).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[
T5Block(config, has_relative_attention_bias=bool(i == 0))
for i in range(config.num_layers)
]
)
self.final_layer_norm = T5LayerNorm(
config.d_model, eps=config.layer_norm_epsilon
)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds"
)
if inputs_embeds is None:
assert (
self.embed_tokens is not None
), "You have to intialize the model with valid token embeddings"
if self.training and self.is_decoder and len(input_ids) == 2:
inputs_embeds = self.embed_tokens(input_ids[0])
else:
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = (
past_key_values[0][0].shape[2] + seq_length
if past_key_values is not None
else seq_length
)
if use_cache is True:
assert (
self.is_decoder
), ":obj:`use_cache` can only be set to `True` if {} is used as a decoder".format(
self
)
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(
inputs_embeds.device
)
if (
self.is_decoder
and encoder_attention_mask is None
and encoder_hidden_states is not None
):
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size,
encoder_seq_length,
device=inputs_embeds.device,
dtype=torch.long,
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, inputs_embeds.device
)
# print(f"extended_attention_mask: {extended_attention_mask.shape}")
if self.is_decoder and encoder_attention_mask is not None:
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(
zip(self.block, past_key_values)
):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
head_mask=head_mask[i],
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
hidden_states, present_key_value_state = layer_outputs[:2]
if i == 0:
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
position_bias = layer_outputs[3 if output_attentions else 2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[
5 if output_attentions else 3
]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (
present_key_value_state,
)
if output_attentions:
all_attentions = all_attentions + (
layer_outputs[2],
) # We keep only self-attention weights for now
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
T5_START_DOCSTRING = r"""
The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer
<https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.
It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right
and the left.
Indices can be obtained using :class:`~transformers.T5Tokenizer`.
See :meth:`transformers.PreTrainedTokenizer.encode` and
:meth:`transformers.PreTrainedTokenizer.__call__` for detail.
To know more on how to prepare :obj:`input_ids` for pretraining take a look a
`T5 Training <./t5.html#training>`__.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for sequence to sequence training. T5 uses the :obj:`pad_token_id` as the starting token for
:obj:`decoder_input_ids` generation.
If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at
`T5 Training <./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both
unset, :obj:`decoder_input_ids` takes the value of :obj:`input_ids`.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`: `attentions`)
:obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation.
If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds` have to be input
(see :obj:`past_key_values`).
This is useful if you want more control over how to convert :obj:`decoder_input_ids` indices into
associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both
unset, :obj:`decoder_inputs_embeds` takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states"
"without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
if self.multiple_decoder:
self.decoder_list = []
for i in range(self.decoder_num):
self.decoder_list.append(T5Stack(decoder_config, self.shared))
else:
self.decoder = T5Stack(decoder_config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings( | output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC | 2 | 2023-10-18 09:40:42+00:00 | 8k |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.